diff --git "a/codeparrot-valid_1009.txt" "b/codeparrot-valid_1009.txt" new file mode 100644--- /dev/null +++ "b/codeparrot-valid_1009.txt" @@ -0,0 +1,10000 @@ + country = "??" + nl.append(NodeLocation(dc["DATACENTERID"], + dc["LOCATION"], + country, + self)) + return nl + + def linode_set_datacenter(self, dc): + """ + Set the default datacenter for Linode creation + + Since Linodes must be created in a facility, this function sets the + default that :class:`create_node` will use. If a location keyword is + not passed to :class:`create_node`, this method must have already been + used. + + :keyword dc: the datacenter to create Linodes in unless specified + :type dc: :class:`NodeLocation` + + :rtype: ``bool`` + """ + did = dc.id + params = {"api_action": "avail.datacenters"} + data = self.connection.request(API_ROOT, params=params).objects[0] + for datacenter in data: + if did == dc["DATACENTERID"]: + self.datacenter = did + return + + dcs = ", ".join([d["DATACENTERID"] for d in data]) + self.datacenter = None + raise LinodeException(0xFD, "Invalid datacenter (use one of %s)" % dcs) + + def destroy_volume(self, volume): + """ + Destroys disk volume for the Linode. Linode id is to be provided as + extra["LinodeId"] whithin :class:`StorageVolume`. It can be retrieved + by :meth:`libcloud.compute.drivers.linode.LinodeNodeDriver\ + .ex_list_volumes`. + + :param volume: Volume to be destroyed + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + if not isinstance(volume, StorageVolume): + raise LinodeException(0xFD, "Invalid volume instance") + + if volume.extra["LINODEID"] is None: + raise LinodeException(0xFD, "Missing LinodeID") + + params = { + "api_action": "linode.disk.delete", + "LinodeID": volume.extra["LINODEID"], + "DiskID": volume.id, + } + self.connection.request(API_ROOT, params=params) + + return True + + def ex_create_volume(self, size, name, node, fs_type): + """ + Create disk for the Linode. + + :keyword size: Size of volume in megabytes (required) + :type size: ``int`` + + :keyword name: Name of the volume to be created + :type name: ``str`` + + :keyword node: Node to attach volume to. + :type node: :class:`Node` + + :keyword fs_type: The formatted type of this disk. Valid types are: + ext3, ext4, swap, raw + :type fs_type: ``str`` + + + :return: StorageVolume representing the newly-created volume + :rtype: :class:`StorageVolume` + """ + # check node + if not isinstance(node, Node): + raise LinodeException(0xFD, "Invalid node instance") + + # check space available + total_space = node.extra['TOTALHD'] + existing_volumes = self.ex_list_volumes(node) + used_space = 0 + for volume in existing_volumes: + used_space = used_space + volume.size + + available_space = total_space - used_space + if available_space < size: + raise LinodeException(0xFD, "Volume size too big. Available space\ + %d" % available_space) + + # check filesystem type + if fs_type not in self._linode_disk_filesystems: + raise LinodeException(0xFD, "Not valid filesystem type") + + params = { + "api_action": "linode.disk.create", + "LinodeID": node.id, + "Label": name, + "Type": fs_type, + "Size": size + } + data = self.connection.request(API_ROOT, params=params).objects[0] + volume = data["DiskID"] + # Make a volume out of it and hand it back + params = { + "api_action": "linode.disk.list", + "LinodeID": node.id, + "DiskID": volume + } + data = self.connection.request(API_ROOT, params=params).objects[0] + return self._to_volumes(data)[0] + + def ex_list_volumes(self, node, disk_id=None): + """ + List existing disk volumes for for given Linode. + + :keyword node: Node to list disk volumes for. (required) + :type node: :class:`Node` + + :keyword disk_id: Id for specific disk volume. (optional) + :type disk_id: ``int`` + + :rtype: ``list`` of :class:`StorageVolume` + """ + if not isinstance(node, Node): + raise LinodeException(0xFD, "Invalid node instance") + + params = { + "api_action": "linode.disk.list", + "LinodeID": node.id + } + # Add param if disk_id was specified + if disk_id is not None: + params["DiskID"] = disk_id + + data = self.connection.request(API_ROOT, params=params).objects[0] + return self._to_volumes(data) + + def _to_volumes(self, objs): + """ + Covert returned JSON volumes into StorageVolume instances + + :keyword objs: ``list`` of JSON dictionaries representing the + StorageVolumes + :type objs: ``list`` + + :return: ``list`` of :class:`StorageVolume`s + """ + volumes = {} + for o in objs: + vid = o["DISKID"] + volumes[vid] = vol = StorageVolume(id=vid, name=o["LABEL"], + size=int(o["SIZE"]), + driver=self.connection.driver) + vol.extra = copy(o) + return list(volumes.values()) + + def _to_nodes(self, objs): + """Convert returned JSON Linodes into Node instances + + :keyword objs: ``list`` of JSON dictionaries representing the Linodes + :type objs: ``list`` + :return: ``list`` of :class:`Node`s""" + + # Get the IP addresses for the Linodes + nodes = {} + batch = [] + for o in objs: + lid = o["LINODEID"] + nodes[lid] = n = Node(id=lid, name=o["LABEL"], public_ips=[], + private_ips=[], + state=self.LINODE_STATES[o["STATUS"]], + driver=self.connection.driver) + n.extra = copy(o) + n.extra["PLANID"] = self._linode_plan_ids.get(o.get("TOTALRAM")) + batch.append({"api_action": "linode.ip.list", "LinodeID": lid}) + + # Avoid batch limitation + ip_answers = [] + args = [iter(batch)] * 25 + + if PY3: + izip_longest = itertools.zip_longest # pylint: disable=no-member + else: + izip_longest = getattr(itertools, 'izip_longest', _izip_longest) + + for twenty_five in izip_longest(*args): + twenty_five = [q for q in twenty_five if q] + params = {"api_action": "batch", + "api_requestArray": json.dumps(twenty_five)} + req = self.connection.request(API_ROOT, params=params) + if not req.success() or len(req.objects) == 0: + return None + ip_answers.extend(req.objects) + + # Add the returned IPs to the nodes and return them + for ip_list in ip_answers: + for ip in ip_list: + lid = ip["LINODEID"] + which = nodes[lid].public_ips if ip["ISPUBLIC"] == 1 else\ + nodes[lid].private_ips + which.append(ip["IPADDRESS"]) + return list(nodes.values()) + + +class LinodeNodeDriverV4(LinodeNodeDriver): + + connectionCls = LinodeConnectionV4 + _linode_disk_filesystems = LINODE_DISK_FILESYSTEMS_V4 + + LINODE_STATES = { + 'running': NodeState.RUNNING, + 'stopped': NodeState.STOPPED, + 'provisioning': NodeState.STARTING, + 'offline': NodeState.STOPPED, + 'booting': NodeState.STARTING, + 'rebooting': NodeState.REBOOTING, + 'shutting_down': NodeState.STOPPING, + 'deleting': NodeState.PENDING, + 'migrating': NodeState.MIGRATING, + 'rebuilding': NodeState.UPDATING, + 'cloning': NodeState.MIGRATING, + 'restoring': NodeState.PENDING, + 'resizing': NodeState.RECONFIGURING + } + + LINODE_DISK_STATES = { + 'ready': StorageVolumeState.AVAILABLE, + 'not ready': StorageVolumeState.CREATING, + 'deleting': StorageVolumeState.DELETING + } + + LINODE_VOLUME_STATES = { + 'creating': StorageVolumeState.CREATING, + 'active': StorageVolumeState.AVAILABLE, + 'resizing': StorageVolumeState.UPDATING, + 'contact_support': StorageVolumeState.UNKNOWN + } + + def list_nodes(self): + """ + Returns a list of Linodes the API key in use has access + to view. + + :return: List of node objects + :rtype: ``list`` of :class:`Node` + """ + + data = self._paginated_request('/v4/linode/instances', 'data') + return [self._to_node(obj) for obj in data] + + def list_sizes(self): + """ + Returns a list of Linode Types + + : rtype: ``list`` of :class: `NodeSize` + """ + data = self._paginated_request('/v4/linode/types', 'data') + return [self._to_size(obj) for obj in data] + + def list_images(self): + """ + Returns a list of images + + :rtype: ``list`` of :class:`NodeImage` + """ + data = self._paginated_request('/v4/images', 'data') + return [self._to_image(obj) for obj in data] + + def list_locations(self): + """ + Lists the Regions available for Linode services + + :rtype: ``list`` of :class:`NodeLocation` + """ + data = self._paginated_request('/v4/regions', 'data') + return [self._to_location(obj) for obj in data] + + def start_node(self, node): + """Boots a node the API Key has permission to modify + + :param node: the node to start + :type node: :class:`Node` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + response = self.connection.request('/v4/linode/instances/%s/boot' + % node.id, + method='POST') + return response.status == httplib.OK + + def ex_start_node(self, node): + # NOTE: This method is here for backward compatibility reasons after + # this method was promoted to be part of the standard compute API in + # Libcloud v2.7.0 + return self.start_node(node=node) + + def stop_node(self, node): + """Shuts down a a node the API Key has permission to modify. + + :param node: the Linode to destroy + :type node: :class:`Node` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + response = self.connection.request('/v4/linode/instances/%s/shutdown' + % node.id, + method='POST') + return response.status == httplib.OK + + def ex_stop_node(self, node): + # NOTE: This method is here for backward compatibility reasons after + # this method was promoted to be part of the standard compute API in + # Libcloud v2.7.0 + return self.stop_node(node=node) + + def destroy_node(self, node): + """Deletes a node the API Key has permission to `read_write` + + :param node: the Linode to destroy + :type node: :class:`Node` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + response = self.connection.request('/v4/linode/instances/%s' + % node.id, + method='DELETE') + return response.status == httplib.OK + + def reboot_node(self, node): + """Reboots a node the API Key has permission to modify. + + :param node: the Linode to destroy + :type node: :class:`Node` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + response = self.connection.request('/v4/linode/instances/%s/reboot' + % node.id, + method='POST') + return response.status == httplib.OK + + def create_node(self, location, size, image=None, + name=None, root_pass=None, ex_authorized_keys=None, + ex_authorized_users=None, ex_tags=None, + ex_backups_enabled=False, ex_private_ip=False): + """Creates a Linode Instance. + In order for this request to complete successfully, + the user must have the `add_linodes` grant as this call + will incur a charge. + + :param location: which region to create the node in + :type location: :class:`NodeLocation` + + :param size: the plan size to create + :type size: :class:`NodeSize` + + :keyword image: which distribution to deploy on the node + :type image: :class:`NodeImage` + + :keyword name: the name to assign to node.\ + Must start with an alpha character.\ + May only consist of alphanumeric characters,\ + dashes (-), underscores (_) or periods (.).\ + Cannot have two dashes (--), underscores (__) or periods (..) in a row. + :type name: ``str`` + + :keyword root_pass: the root password (required if image is provided) + :type root_pass: ``str`` + + :keyword ex_authorized_keys: a list of public SSH keys + :type ex_authorized_keys: ``list`` of ``str`` + + :keyword ex_authorized_users: a list of usernames.\ + If the usernames have associated SSH keys,\ + the keys will be appended to the root users `authorized_keys` + :type ex_authorized_users: ``list`` of ``str`` + + :keyword ex_tags: list of tags for the node + :type ex_tags: ``list`` of ``str`` + + :keyword ex_backups_enabled: whether to be enrolled \ + in the Linode Backup service (False) + :type ex_backups_enabled: ``bool`` + + :keyword ex_private_ip: whether or not to request a private IP + :type ex_private_ip: ``bool`` + + :return: Node representing the newly-created node + :rtype: :class:`Node` + """ + + if not isinstance(location, NodeLocation): + raise LinodeExceptionV4("Invalid location instance") + + if not isinstance(size, NodeSize): + raise LinodeExceptionV4("Invalid size instance") + + attr = {'region': location.id, + 'type': size.id, + 'private_ip': ex_private_ip, + 'backups_enabled': ex_backups_enabled, + } + + if image is not None: + if root_pass is None: + raise LinodeExceptionV4("root password required " + "when providing an image") + attr['image'] = image.id + attr['root_pass'] = root_pass + + if name is not None: + valid_name = r'^[a-zA-Z]((?!--|__|\.\.)[a-zA-Z0-9-_.])+$' + if not re.match(valid_name, name): + raise LinodeExceptionV4("Invalid name") + attr['label'] = name + if ex_authorized_keys is not None: + attr['authorized_keys'] = list(ex_authorized_keys) + if ex_authorized_users is not None: + attr['authorized_users'] = list(ex_authorized_users) + if ex_tags is not None: + attr['tags'] = list(ex_tags) + + response = self.connection.request('/v4/linode/instances', + data=json.dumps(attr), + method='POST').object + return self._to_node(response) + + def ex_get_node(self, node_id): + """ + Return a Node object based on a node ID. + + :keyword node_id: Node's ID + :type node_id: ``str`` + + :return: Created node + :rtype : :class:`Node` + """ + response = self.connection.request('/v4/linode/instances/%s' + % node_id).object + return self._to_node(response) + + def ex_list_disks(self, node): + """ + List disks associated with the node. + + :param node: Node to list disks. (required) + :type node: :class:`Node` + + :rtype: ``list`` of :class:`LinodeDisk` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + data = self._paginated_request('/v4/linode/instances/%s/disks' + % node.id, 'data') + + return [self._to_disk(obj) for obj in data] + + def ex_create_disk(self, size, name, node, fs_type, + image=None, ex_root_pass=None, ex_authorized_keys=None, + ex_authorized_users=None, ex_read_only=False): + """ + Adds a new disk to node + + :param size: Size of disk in megabytes (required) + :type size: ``int`` + + :param name: Name of the disk to be created (required) + :type name: ``str`` + + :param node: Node to attach disk to (required) + :type node: :class:`Node` + + :param fs_type: The formatted type of this disk. Valid types are: + ext3, ext4, swap, raw, initrd + :type fs_type: ``str`` + + :keyword image: Image to deploy the volume from + :type image: :class:`NodeImage` + + :keyword ex_root_pass: root password,required \ + if an image is provided + :type ex_root_pass: ``str`` + + :keyword ex_authorized_keys: a list of SSH keys + :type ex_authorized_keys: ``list`` of ``str`` + + :keyword ex_authorized_users: a list of usernames \ + that will have their SSH keys,\ + if any, automatically appended \ + to the root user's ~/.ssh/authorized_keys file. + :type ex_authorized_users: ``list`` of ``str`` + + :keyword ex_read_only: if true, this disk is read-only + :type ex_read_only: ``bool`` + + :return: LinodeDisk representing the newly-created disk + :rtype: :class:`LinodeDisk` + """ + + attr = {'label': str(name), + 'size': int(size), + 'filesystem': fs_type, + 'read_only': ex_read_only} + + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + if fs_type not in self._linode_disk_filesystems: + raise LinodeExceptionV4("Not valid filesystem type") + + if image is not None: + if not isinstance(image, NodeImage): + raise LinodeExceptionV4("Invalid image instance") + # when an image is set, root pass must be set as well + if ex_root_pass is None: + raise LinodeExceptionV4("root_pass is required when " + "deploying an image") + attr['image'] = image.id + attr['root_pass'] = ex_root_pass + + if ex_authorized_keys is not None: + attr['authorized_keys'] = list(ex_authorized_keys) + + if ex_authorized_users is not None: + attr['authorized_users'] = list(ex_authorized_users) + + response = self.connection.request('/v4/linode/instances/%s/disks' + % node.id, + data=json.dumps(attr), + method='POST').object + return self._to_disk(response) + + def ex_destroy_disk(self, node, disk): + """ + Destroys disk for the given node. + + :param node: The Node the disk is attached to. (required) + :type node: :class:`Node` + + :param disk: LinodeDisk to be destroyed (required) + :type disk: :class:`LinodeDisk` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + if not isinstance(disk, LinodeDisk): + raise LinodeExceptionV4("Invalid disk instance") + + if node.state != self.LINODE_STATES['stopped']: + raise LinodeExceptionV4("Node needs to be stopped" + " before disk is destroyed") + + response = self.connection.request('/v4/linode/instances/%s/disks/%s' + % (node.id, disk.id), + method='DELETE') + return response.status == httplib.OK + + def list_volumes(self): + """Get all volumes of the account + :rtype: `list` of :class: `StorageVolume` + """ + data = self._paginated_request('/v4/volumes', 'data') + + return [self._to_volume(obj) for obj in data] + + def create_volume(self, name, size, location=None, node=None, tags=None): + """Creates a volume and optionally attaches it to a node. + + :param name: The name to be given to volume (required).\ + Must start with an alpha character. \ + May only consist of alphanumeric characters,\ + dashes (-), underscores (_)\ + Cannot have two dashes (--), underscores (__) in a row. + + :type name: `str` + + :param size: Size in gigabytes (required) + :type size: `int` + + :keyword location: Location to create the node.\ + Required if node is not given. + :type location: :class:`NodeLocation` + + :keyword volume: Node to attach the volume to + :type volume: :class:`Node` + + :keyword tags: tags to apply to volume + :type tags: `list` of `str` + + :rtype: :class: `StorageVolume` + """ + + valid_name = '^[a-zA-Z]((?!--|__)[a-zA-Z0-9-_])+$' + if not re.match(valid_name, name): + raise LinodeExceptionV4("Invalid name") + + attr = { + 'label': name, + 'size': int(size), + } + + if node is not None: + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + attr['linode_id'] = int(node.id) + else: + # location is only required if a node is not given + if location: + if not isinstance(location, NodeLocation): + raise LinodeExceptionV4("Invalid location instance") + attr['region'] = location.id + else: + raise LinodeExceptionV4("Region must be provided " + "when node is not") + if tags is not None: + attr['tags'] = list(tags) + + response = self.connection.request('/v4/volumes', + data=json.dumps(attr), + method='POST').object + return self._to_volume(response) + + def attach_volume(self, node, volume, persist_across_boots=True): + """Attaches a volume to a node. + Volume and node must be located in the same region + + :param node: Node to attach the volume to(required) + :type node: :class:`Node` + + :param volume: Volume to be attached (required) + :type volume: :class:`StorageVolume` + + :keyword persist_across_boots: Wether volume should be \ + attached to node across boots + :type persist_across_boots: `bool` + + :rtype: :class: `StorageVolume` + """ + if not isinstance(volume, StorageVolume): + raise LinodeExceptionV4("Invalid volume instance") + + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + if volume.extra['linode_id'] is not None: + raise LinodeExceptionV4("Volume is already attached to a node") + + if node.extra['location'] != volume.extra['location']: + raise LinodeExceptionV4("Volume and node " + "must be on the same region") + + attr = { + 'linode_id': int(node.id), + 'persist_across_boots': persist_across_boots + } + + response = self.connection.request('/v4/volumes/%s/attach' + % volume.id, + data=json.dumps(attr), + method='POST').object + return self._to_volume(response) + + def detach_volume(self, volume): + """Detaches a volume from a node. + + :param volume: Volume to be detached (required) + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + if not isinstance(volume, StorageVolume): + raise LinodeExceptionV4("Invalid volume instance") + + if volume.extra['linode_id'] is None: + raise LinodeExceptionV4("Volume is already detached") + + response = self.connection.request('/v4/volumes/%s/detach' + % volume.id, + method='POST') + return response.status == httplib.OK + + def destroy_volume(self, volume): + """Destroys the volume given. + + :param volume: Volume to be deleted (required) + :type volume: :class:`StorageVolume` + + :rtype: ``bool`` + """ + if not isinstance(volume, StorageVolume): + raise LinodeExceptionV4("Invalid volume instance") + + if volume.extra['linode_id'] is not None: + raise LinodeExceptionV4("Volume must be detached" + " before it can be deleted.") + response = self.connection.request('/v4/volumes/%s' + % volume.id, + method='DELETE') + return response.status == httplib.OK + + def ex_resize_volume(self, volume, size): + """Resizes the volume given. + + :param volume: Volume to be resized + :type volume: :class:`StorageVolume` + + :param size: new volume size in gigabytes, must be\ + greater than current size + :type size: `int` + + :rtype: ``bool`` + """ + if not isinstance(volume, StorageVolume): + raise LinodeExceptionV4("Invalid volume instance") + + if volume.size >= size: + raise LinodeExceptionV4("Volumes can only be resized up") + attr = { + 'size': size + } + + response = self.connection.request('/v4/volumes/%s/resize' + % volume.id, + data=json.dumps(attr), + method='POST') + return response.status == httplib.OK + + def ex_clone_volume(self, volume, name): + """Clones the volume given + + :param volume: Volume to be cloned + :type volume: :class:`StorageVolume` + + :param name: new cloned volume name + :type name: `str` + + :rtype: :class:`StorageVolume` + """ + + if not isinstance(volume, StorageVolume): + raise LinodeExceptionV4("Invalid volume instance") + + attr = { + 'label': name + } + response = self.connection.request('/v4/volumes/%s/clone' + % volume.id, + data=json.dumps(attr), + method='POST').object + + return self._to_volume(response) + + def ex_get_volume(self, volume_id): + """ + Return a Volume object based on a volume ID. + + :param volume_id: Volume's id + :type volume_id: ``str`` + + :return: A StorageVolume object for the volume + :rtype: :class:`StorageVolume` + """ + response = self.connection.request('/v4/volumes/%s' + % volume_id).object + return self._to_volume(response) + + def create_image(self, disk, name=None, description=None): + """Creates a private image from a LinodeDisk. + Images are limited to three per account. + + :param disk: LinodeDisk to create the image from (required) + :type disk: :class:`LinodeDisk` + + :keyword name: A name for the image.\ + Defaults to the name of the disk \ + it is being created from if not provided + :type name: `str` + + :keyword description: A description of the image + :type description: `str` + + :return: The newly created NodeImage + :rtype: :class:`NodeImage` + """ + + if not isinstance(disk, LinodeDisk): + raise LinodeExceptionV4("Invalid disk instance") + + attr = { + 'disk_id': int(disk.id), + 'label': name, + 'description': description + } + + response = self.connection.request('/v4/images', + data=json.dumps(attr), + method='POST').object + return self._to_image(response) + + def delete_image(self, image): + """Deletes a private image + + :param image: NodeImage to delete (required) + :type image: :class:`NodeImage` + + :rtype: ``bool`` + """ + if not isinstance(image, NodeImage): + raise LinodeExceptionV4("Invalid image instance") + + response = self.connection.request('/v4/images/%s' + % image.id, + method='DELETE') + return response.status == httplib.OK + + def ex_list_addresses(self): + """List IP addresses + + :return: LinodeIPAddress list + :rtype: `list` of :class:`LinodeIPAddress` + """ + data = self._paginated_request('/v4/networking/ips', 'data') + + return [self._to_address(obj) for obj in data] + + def ex_list_node_addresses(self, node): + """List all IPv4 addresses attached to node + + :param node: Node to list IP addresses + :type node: :class:`Node` + + :return: LinodeIPAddress list + :rtype: `list` of :class:`LinodeIPAddress` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + response = self.connection.request('/v4/linode/instances/%s/ips' + % node.id).object + return self._to_addresses(response) + + def ex_allocate_private_address(self, node, address_type='ipv4'): + """Allocates a private IPv4 address to node.Only ipv4 is currently supported + + :param node: Node to attach the IP address + :type node: :class:`Node` + + :keyword address_type: Type of IP address + :type address_type: `str` + + :return: The newly created LinodeIPAddress + :rtype: :class:`LinodeIPAddress` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + # Only ipv4 is currently supported + if address_type != 'ipv4': + raise LinodeExceptionV4("Address type not supported") + # Only one private IP address can be allocated + if len(node.private_ips) >= 1: + raise LinodeExceptionV4("Nodes can have up to one private IP") + + attr = { + 'public': False, + 'type': address_type + } + + response = self.connection.request('/v4/linode/instances/%s/ips' + % node.id, + data=json.dumps(attr), + method='POST').object + return self._to_address(response) + + def ex_share_address(self, node, addresses): + """Shares an IP with another node.This can be used to allow one Linode + to begin serving requests should another become unresponsive. + + :param node: Node to share the IP addresses with + :type node: :class:`Node` + + :keyword addresses: List of IP addresses to share + :type address_type: `list` of :class: `LinodeIPAddress` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + if not all(isinstance(address, LinodeIPAddress) + for address in addresses): + raise LinodeExceptionV4("Invalid address instance") + + attr = { + 'ips': [address.inet for address in addresses], + 'linode_id': int(node.id) + } + response = self.connection.request('/v4/networking/ipv4/share', + data=json.dumps(attr), + method='POST') + return response.status == httplib.OK + + def ex_resize_node(self, node, size, allow_auto_disk_resize=False): + """ + Resizes a node the API Key has read_write permission + to a different Type. + The following requirements must be met: + - The node must not have a pending migration + - The account cannot have an outstanding balance + - The node must not have more disk allocation than the new size allows + + :param node: the Linode to resize + :type node: :class:`Node` + + :param size: the size of the new node + :type size: :class:`NodeSize` + + :keyword allow_auto_disk_resize: Automatically resize disks \ + when resizing a node. + :type allow_auto_disk_resize: ``bool`` + + :rtype: ``bool`` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + if not isinstance(size, NodeSize): + raise LinodeExceptionV4("Invalid node size") + + attr = {'type': size.id, + 'allow_auto_disk_resize': allow_auto_disk_resize} + + response = self.connection.request( + '/v4/linode/instances/%s/resize' % node.id, + data=json.dumps(attr), + method='POST') + + return response.status == httplib.OK + + def ex_rename_node(self, node, name): + """Renames a node + + :param node: the Linode to resize + :type node: :class:`Node` + + :param name: the node's new name + :type name: ``str`` + + :return: Changed Node + :rtype: :class:`Node` + """ + if not isinstance(node, Node): + raise LinodeExceptionV4("Invalid node instance") + + attr = {'label': name} + + response = self.connection.request( + '/v4/linode/instances/%s' % node.id, + data=json.dumps(attr), + method='PUT').object + + return self._to_node(response) + + def _to_node(self, data): + extra = { + 'tags': data['tags'], + 'location': data['region'], + 'ipv6': data['ipv6'], + 'hypervisor': data['hypervisor'], + 'specs': data['specs'], + 'alerts': data['alerts'], + 'backups': data['backups'], + 'watchdog_enabled': data['watchdog_enabled'] + } + + public_ips = [ip for ip in data['ipv4'] if not is_private_subnet(ip)] + private_ips = [ip for ip in data['ipv4'] if is_private_subnet(ip)] + return Node( + id=data['id'], + name=data['label'], + state=self.LINODE_STATES[data['status']], + public_ips=public_ips, + private_ips=private_ips, + driver=self, + size=data['type'], + image=data['image'], + created_at=self._to_datetime(data['created']), + extra=extra) + + def _to_datetime(self, strtime): + return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S") + + def _to_size(self, data): + extra = { + 'class': data['class'], + 'monthly_price': data['price']['monthly'], + 'addons': data['addons'], + 'successor': data['successor'], + 'transfer': data['transfer'], + 'vcpus': data['vcpus'], + 'gpus': data['gpus'] + } + return NodeSize( + id=data['id'], + name=data['label'], + ram=data['memory'], + disk=data['disk'], + bandwidth=data['network_out'], + price=data['price']['hourly'], + driver=self, + extra=extra + ) + + def _to_image(self, data): + extra = { + 'type': data['type'], + 'description': data['description'], + 'created': self._to_datetime(data['created']), + 'created_by': data['created_by'], + 'is_public': data['is_public'], + 'size': data['size'], + 'eol': data['eol'], + 'vendor': data['vendor'], + } + return NodeImage( + id=data['id'], + name=data['label'], + driver=self, + extra=extra + ) + + def _to_location(self, data): + extra = { + 'status': data['status'], + 'capabilities': data['capabilities'], + 'resolvers': data['resolvers'] + } + return NodeLocation( + id=data['id'], + name=data['id'], + country=data['country'].upper(), + driver=self, + extra=extra) + + def _to_volume(self, data): + extra = { + 'created': self._to_datetime(data['created']), + 'tags': data['tags'], + 'location': data['region'], + 'linode_id': data['linode_id'], + 'linode_label': data['linode_label'], + 'state': self.LINODE_VOLUME_STATES[data['status']], + 'filesystem_path': data['filesystem_path'] + } + return StorageVolume( + id=str(data['id']), + name=data['label'], + size=data['size'], + driver=self, + extra=extra) + + def _to_disk(self, data): + return LinodeDisk( + id=data['id'], + state=self.LINODE_DISK_STATES[data['status']], + name=data['label'], + filesystem=data['filesystem'], + size=data['size'], + driver=self, + ) + + def _to_address(self, data): + extra = { + 'gateway': data['gateway'], + 'subnet_mask': data['subnet_mask'], + 'prefix': data['prefix'], + 'rdns': data['rdns'], + 'node_id': data['linode_id'], + 'region': data['region'], + } + return LinodeIPAddress( + inet=data['address'], + public=data['public'], + version=data['type'], + driver=self, + extra=extra + ) + + def _to_addresses(self, data): + addresses = data['ipv4']['public'] + data['ipv4']['private'] + return [self._to_address(address) for address in addresses] + + def _paginated_request(self, url, obj, params=None): + """ + Perform multiple calls in order to have a full list of elements when + the API responses are paginated. + + :param url: API endpoint + :type url: ``str`` + + :param obj: Result object key + :type obj: ``str`` + + :param params: Request parameters + :type params: ``dict`` + + :return: ``list`` of API response objects + :rtype: ``list`` + """ + objects = [] + params = params if params is not None else {} + + ret = self.connection.request(url, params=params).object + + data = list(ret.get(obj, [])) + current_page = int(ret.get('page', 1)) + num_of_pages = int(ret.get('pages', 1)) + objects.extend(data) + for page in range(current_page + 1, num_of_pages + 1): + # add param to request next page + params['page'] = page + ret = self.connection.request(url, params=params).object + data = list(ret.get(obj, [])) + objects.extend(data) + return objects + + +def _izip_longest(*args, **kwds): + """Taken from Python docs + + http://docs.python.org/library/itertools.html#itertools.izip + """ + + fillvalue = kwds.get('fillvalue') + + def sentinel(counter=([fillvalue] * (len(args) - 1)).pop): + yield counter() # yields the fillvalue, or raises IndexError + + fillers = itertools.repeat(fillvalue) + iters = [itertools.chain(it, sentinel(), fillers) for it in args] + try: + for tup in itertools.izip(*iters): # pylint: disable=no-member + yield tup + except IndexError: + pass + +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2016, René Moser +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: cs_role +short_description: Manages user roles on Apache CloudStack based clouds. +description: + - Create, update, delete user roles. +version_added: '2.3' +author: René Moser (@resmo) +options: + name: + description: + - Name of the role. + type: str + required: true + id: + description: + - ID of the role. + - If provided, I(id) is used as key. + type: str + aliases: [ uuid ] + role_type: + description: + - Type of the role. + - Only considered for creation. + type: str + default: User + choices: [ User, DomainAdmin, ResourceAdmin, Admin ] + description: + description: + - Description of the role. + type: str + state: + description: + - State of the role. + type: str + default: present + choices: [ present, absent ] +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +- name: Ensure an user role is present + cs_role: + name: myrole_user + delegate_to: localhost + +- name: Ensure a role having particular ID is named as myrole_user + cs_role: + name: myrole_user + id: 04589590-ac63-4ffc-93f5-b698b8ac38b6 + delegate_to: localhost + +- name: Ensure a role is absent + cs_role: + name: myrole_user + state: absent + delegate_to: localhost +''' + +RETURN = ''' +--- +id: + description: UUID of the role. + returned: success + type: str + sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6 +name: + description: Name of the role. + returned: success + type: str + sample: myrole +description: + description: Description of the role. + returned: success + type: str + sample: "This is my role description" +role_type: + description: Type of the role. + returned: success + type: str + sample: User +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.cloudstack import ( + AnsibleCloudStack, + cs_argument_spec, + cs_required_together, +) + + +class AnsibleCloudStackRole(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackRole, self).__init__(module) + self.returns = { + 'type': 'role_type', + } + + def get_role(self): + uuid = self.module.params.get('uuid') + if uuid: + args = { + 'id': uuid, + } + roles = self.query_api('listRoles', **args) + if roles: + return roles['role'][0] + else: + args = { + 'name': self.module.params.get('name'), + } + roles = self.query_api('listRoles', **args) + if roles: + return roles['role'][0] + return None + + def present_role(self): + role = self.get_role() + if role: + role = self._update_role(role) + else: + role = self._create_role(role) + return role + + def _create_role(self, role): + self.result['changed'] = True + args = { + 'name': self.module.params.get('name'), + 'type': self.module.params.get('role_type'), + 'description': self.module.params.get('description'), + } + if not self.module.check_mode: + res = self.query_api('createRole', **args) + role = res['role'] + return role + + def _update_role(self, role): + args = { + 'id': role['id'], + 'name': self.module.params.get('name'), + 'description': self.module.params.get('description'), + } + if self.has_changed(args, role): + self.result['changed'] = True + if not self.module.check_mode: + res = self.query_api('updateRole', **args) + + # The API as in 4.9 does not return an updated role yet + if 'role' not in res: + role = self.get_role() + else: + role = res['role'] + return role + + def absent_role(self): + role = self.get_role() + if role: + self.result['changed'] = True + args = { + 'id': role['id'], + } + if not self.module.check_mode: + self.query_api('deleteRole', **args) + return role + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + uuid=dict(aliases=['id']), + name=dict(required=True), + description=dict(), + role_type=dict(choices=['User', 'DomainAdmin', 'ResourceAdmin', 'Admin'], default='User'), + state=dict(choices=['present', 'absent'], default='present'), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + supports_check_mode=True + ) + + acs_role = AnsibleCloudStackRole(module) + state = module.params.get('state') + if state == 'absent': + role = acs_role.absent_role() + else: + role = acs_role.present_role() + + result = acs_role.get_result(role) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() + +#!/usr/bin/env python + +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +import os +import subprocess +import sys +import BaseHTTPServer +import SimpleHTTPServer +import urlparse +import json + + +# Port to run the HTTP server on for Dromaeo. +TEST_SERVER_PORT = 8192 + + +# Run servo and print / parse the results for a specific Dromaeo module. +def run_servo(servo_exe, tests): + url = "http://localhost:{0}/dromaeo/web/?{1}&automated&post_json".format(TEST_SERVER_PORT, tests) + args = [servo_exe, url, "-z", "-f"] + return subprocess.Popen(args) + + +# Print usage if command line args are incorrect +def print_usage(): + print("USAGE: {0} tests servo_binary dromaeo_base_dir".format(sys.argv[0])) + + +# Handle the POST at the end +class RequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): + def do_POST(self): + self.send_response(200) + self.end_headers() + self.wfile.write("POST OK.

") + length = int(self.headers.getheader('content-length')) + parameters = urlparse.parse_qs(self.rfile.read(length)) + self.server.got_post = True + self.server.post_data = parameters['data'] + + def log_message(self, format, *args): + return + + +if __name__ == '__main__': + if len(sys.argv) == 4: + tests = sys.argv[1] + servo_exe = sys.argv[2] + base_dir = sys.argv[3] + os.chdir(base_dir) + + # Ensure servo binary can be found + if not os.path.isfile(servo_exe): + print("Unable to find {0}. This script expects an existing build of Servo.".format(servo_exe)) + sys.exit(1) + + # Start the test server + server = BaseHTTPServer.HTTPServer(('', TEST_SERVER_PORT), RequestHandler) + + print("Testing Dromaeo on Servo!") + proc = run_servo(servo_exe, tests) + server.got_post = False + while not server.got_post: + server.handle_request() + data = json.loads(server.post_data[0]) + n = 0 + l = 0 + for test in data: + n = max(n, len(data[test])) + l = max(l, len(test)) + print("\n Test{0} | Time".format(" " * (l - len("Test")))) + print("-{0}-|-{1}-".format("-" * l, "-" * n)) + for test in data: + print(" {0}{1} | {2}".format(test, " " * (l - len(test)), data[test])) + proc.kill() + else: + print_usage() + +""" +Module containing single call export functions. +""" +#----------------------------------------------------------------------------- +# Copyright (c) 2013, the IPython Development Team. +# +# Distributed under the terms of the Modified BSD License. +# +# The full license is in the file COPYING.txt, distributed with this software. +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from functools import wraps + +from IPython.nbformat.v3.nbbase import NotebookNode +from IPython.utils.decorators import undoc +from IPython.utils.py3compat import string_types + +from .exporter import Exporter +from .templateexporter import TemplateExporter +from .html import HTMLExporter +from .slides import SlidesExporter +from .latex import LatexExporter +from .markdown import MarkdownExporter +from .python import PythonExporter +from .rst import RSTExporter + +#----------------------------------------------------------------------------- +# Classes +#----------------------------------------------------------------------------- + +@undoc +def DocDecorator(f): + + #Set docstring of function + f.__doc__ = f.__doc__ + """ + nb : :class:`~{nbnode_mod}.NotebookNode` + The notebook to export. + config : config (optional, keyword arg) + User configuration instance. + resources : dict (optional, keyword arg) + Resources used in the conversion process. + + Returns + ------- + tuple- output, resources, exporter_instance + output : str + Jinja 2 output. This is the resulting converted notebook. + resources : dictionary + Dictionary of resources used prior to and during the conversion + process. + exporter_instance : Exporter + Instance of the Exporter class used to export the document. Useful + to caller because it provides a 'file_extension' property which + specifies what extension the output should be saved as. + + Notes + ----- + WARNING: API WILL CHANGE IN FUTURE RELEASES OF NBCONVERT + """.format(nbnode_mod=NotebookNode.__module__) + + @wraps(f) + def decorator(*args, **kwargs): + return f(*args, **kwargs) + + return decorator + + +#----------------------------------------------------------------------------- +# Functions +#----------------------------------------------------------------------------- + +__all__ = [ + 'export', + 'export_html', + 'export_custom', + 'export_slides', + 'export_latex', + 'export_markdown', + 'export_python', + 'export_rst', + 'export_by_name', + 'get_export_names', + 'ExporterNameError' +] + + +class ExporterNameError(NameError): + pass + +@DocDecorator +def export(exporter, nb, **kw): + """ + Export a notebook object using specific exporter class. + + Parameters + ---------- + exporter : class:`~IPython.nbconvert.exporters.exporter.Exporter` class or instance + Class type or instance of the exporter that should be used. If the + method initializes it's own instance of the class, it is ASSUMED that + the class type provided exposes a constructor (``__init__``) with the same + signature as the base Exporter class. + """ + + #Check arguments + if exporter is None: + raise TypeError("Exporter is None") + elif not isinstance(exporter, Exporter) and not issubclass(exporter, Exporter): + raise TypeError("exporter does not inherit from Exporter (base)") + if nb is None: + raise TypeError("nb is None") + + #Create the exporter + resources = kw.pop('resources', None) + if isinstance(exporter, Exporter): + exporter_instance = exporter + else: + exporter_instance = exporter(**kw) + + #Try to convert the notebook using the appropriate conversion function. + if isinstance(nb, NotebookNode): + output, resources = exporter_instance.from_notebook_node(nb, resources) + elif isinstance(nb, string_types): + output, resources = exporter_instance.from_filename(nb, resources) + else: + output, resources = exporter_instance.from_file(nb, resources) + return output, resources + +exporter_map = dict( + custom=TemplateExporter, + html=HTMLExporter, + slides=SlidesExporter, + latex=LatexExporter, + markdown=MarkdownExporter, + python=PythonExporter, + rst=RSTExporter, +) + +def _make_exporter(name, E): + """make an export_foo function from a short key and Exporter class E""" + def _export(nb, **kw): + return export(E, nb, **kw) + _export.__doc__ = """Export a notebook object to {0} format""".format(name) + return _export + +g = globals() + +for name, E in exporter_map.items(): + g['export_%s' % name] = DocDecorator(_make_exporter(name, E)) + +@DocDecorator +def export_by_name(format_name, nb, **kw): + """ + Export a notebook object to a template type by its name. Reflection + (Inspect) is used to find the template's corresponding explicit export + method defined in this module. That method is then called directly. + + Parameters + ---------- + format_name : str + Name of the template style to export to. + """ + + function_name = "export_" + format_name.lower() + + if function_name in globals(): + return globals()[function_name](nb, **kw) + else: + raise ExporterNameError("template for `%s` not found" % function_name) + + +def get_export_names(): + """Return a list of the currently supported export targets + + WARNING: API WILL CHANGE IN FUTURE RELEASES OF NBCONVERT""" + return sorted(exporter_map.keys()) + +# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- +# +# Copyright 2002 Ben Escoto +# Copyright 2007 Kenneth Loafman +# +# This file is part of duplicity. +# +# Duplicity is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 2 of the License, or (at your +# option) any later version. +# +# Duplicity is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with duplicity; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +from future_builtins import filter, map + +import re #@UnusedImport +import types +import os +import tempfile + +from duplicity import tarfile #@UnusedImport +from duplicity import librsync #@UnusedImport +from duplicity import log #@UnusedImport +from duplicity import diffdir +from duplicity import selection +from duplicity import tempdir +from duplicity import util #@UnusedImport +from duplicity.path import * #@UnusedWildImport +from duplicity.lazy import * #@UnusedWildImport + +"""Functions for patching of directories""" + +class PatchDirException( Exception ): + pass + + +def Patch( base_path, difftar_fileobj ): + """Patch given base_path and file object containing delta""" + diff_tarfile = tarfile.TarFile( "arbitrary", "r", difftar_fileobj ) + patch_diff_tarfile( base_path, diff_tarfile ) + assert not difftar_fileobj.close() + +def Patch_from_iter( base_path, fileobj_iter, restrict_index=() ): + """Patch given base_path and iterator of delta file objects""" + diff_tarfile = TarFile_FromFileobjs( fileobj_iter ) + patch_diff_tarfile( base_path, diff_tarfile, restrict_index ) + +def patch_diff_tarfile( base_path, diff_tarfile, restrict_index=() ): + """Patch given Path object using delta tarfile (as in tarfile.TarFile) + + If restrict_index is set, ignore any deltas in diff_tarfile that + don't start with restrict_index. + + """ + if base_path.exists(): + path_iter = selection.Select( base_path ).set_iter() + else: + path_iter = empty_iter() # probably untarring full backup + + diff_path_iter = difftar2path_iter( diff_tarfile ) + if restrict_index: + diff_path_iter = filter_path_iter( diff_path_iter, restrict_index ) + collated = diffdir.collate2iters( path_iter, diff_path_iter ) + + ITR = IterTreeReducer( PathPatcher, [base_path] ) + for basis_path, diff_ropath in collated: + if basis_path: + log.Info(_("Patching %s") % (util.ufn(basis_path.get_relative_path())), + log.InfoCode.patch_file_patching, + util.escape( basis_path.get_relative_path() ) ) + ITR( basis_path.index, basis_path, diff_ropath ) + else: + log.Info(_("Patching %s") % (util.ufn(diff_ropath.get_relative_path())), + log.InfoCode.patch_file_patching, + util.escape( diff_ropath.get_relative_path() ) ) + ITR( diff_ropath.index, basis_path, diff_ropath ) + ITR.Finish() + base_path.setdata() + +def empty_iter(): + if 0: + yield 1 # this never happens, but fools into generator treatment + +def filter_path_iter( path_iter, index ): + """Rewrite path elements of path_iter so they start with index + + Discard any that doesn't start with index, and remove the index + prefix from the rest. + + """ + assert isinstance( index, tuple ) and index, index + l = len( index ) + for path in path_iter: + if path.index[:l] == index: + path.index = path.index[l:] + yield path + +def difftar2path_iter( diff_tarfile ): + """Turn file-like difftarobj into iterator of ROPaths""" + tar_iter = iter( diff_tarfile ) + multivol_fileobj = None + + # The next tar_info is stored in this one element list so + # Multivol_Filelike below can update it. Any StopIterations will + # be passed upwards. + tarinfo_list = [tar_iter.next()] + + while 1: + # This section relevant when a multivol diff is last in tar + if not tarinfo_list[0]: + raise StopIteration + if multivol_fileobj and not multivol_fileobj.at_end: + multivol_fileobj.close() # aborting in middle of multivol + continue + + index, difftype, multivol = get_index_from_tarinfo( tarinfo_list[0] ) + ropath = ROPath( index ) + ropath.init_from_tarinfo( tarinfo_list[0] ) + ropath.difftype = difftype + if difftype == "deleted": + ropath.type = None + elif ropath.isreg(): + if multivol: + multivol_fileobj = Multivol_Filelike( diff_tarfile, tar_iter, + tarinfo_list, index ) + ropath.setfileobj( multivol_fileobj ) + yield ropath + continue # Multivol_Filelike will reset tarinfo_list + else: + ropath.setfileobj( diff_tarfile.extractfile( tarinfo_list[0] ) ) + yield ropath + tarinfo_list[0] = tar_iter.next() + +def get_index_from_tarinfo( tarinfo ): + """Return (index, difftype, multivol) pair from tarinfo object""" + for prefix in ["snapshot/", "diff/", "deleted/", + "multivol_diff/", "multivol_snapshot/"]: + tiname = util.get_tarinfo_name( tarinfo ) + if tiname.startswith( prefix ): + name = tiname[len( prefix ):] # strip prefix + if prefix.startswith( "multivol" ): + if prefix == "multivol_diff/": + difftype = "diff" + else: + difftype = "snapshot" + multivol = 1 + name, num_subs = \ + re.subn( "(?s)^multivol_(diff|snapshot)/?(.*)/[0-9]+$", + "\\2", tiname ) + if num_subs != 1: + raise PatchDirException(u"Unrecognized diff entry %s" % + util.ufn(tiname)) + else: + difftype = prefix[:-1] # strip trailing / + name = tiname[len( prefix ):] + if name.endswith( "/" ): + name = name[:-1] # strip trailing /'s + multivol = 0 + break + else: + raise PatchDirException(u"Unrecognized diff entry %s" % + util.ufn(tiname)) + if name == "." or name == "": + index = () + else: + index = tuple( name.split( "/" ) ) + if '..' in index: + raise PatchDirException(u"Tar entry %s contains '..'. Security " + "violation" % util.ufn(tiname)) + return ( index, difftype, multivol ) + + +class Multivol_Filelike: + """Emulate a file like object from multivols + + Maintains a buffer about the size of a volume. When it is read() + to the end, pull in more volumes as desired. + + """ + def __init__( self, tf, tar_iter, tarinfo_list, index ): + """Initializer. tf is TarFile obj, tarinfo is first tarinfo""" + self.tf, self.tar_iter = tf, tar_iter + self.tarinfo_list = tarinfo_list # must store as list for write access + self.index = index + self.buffer = "" + self.at_end = 0 + + def read( self, length= -1 ): + """Read length bytes from file""" + if length < 0: + while self.addtobuffer(): + pass + real_len = len( self.buffer ) + else: + while len( self.buffer ) < length: + if not self.addtobuffer(): + break + real_len = min( len( self.buffer ), length ) + + result = self.buffer[:real_len] + self.buffer = self.buffer[real_len:] + return result + + def addtobuffer( self ): + """Add next chunk to buffer""" + if self.at_end: + return None + index, difftype, multivol = get_index_from_tarinfo( #@UnusedVariable + self.tarinfo_list[0] ) + if not multivol or index != self.index: + # we've moved on + # the following communicates next tarinfo to difftar2path_iter + self.at_end = 1 + return None + + fp = self.tf.extractfile( self.tarinfo_list[0] ) + self.buffer += fp.read() + fp.close() + + try: + self.tarinfo_list[0] = self.tar_iter.next() + except StopIteration: + self.tarinfo_list[0] = None + self.at_end = 1 + return None + return 1 + + def close( self ): + """If not at end, read remaining data""" + if not self.at_end: + while 1: + self.buffer = "" + if not self.addtobuffer(): + break + self.at_end = 1 + + +class PathPatcher( ITRBranch ): + """Used by DirPatch, process the given basis and diff""" + def __init__( self, base_path ): + """Set base_path, Path of root of tree""" + self.base_path = base_path + self.dir_diff_ropath = None + + def start_process( self, index, basis_path, diff_ropath ): + """Start processing when diff_ropath is a directory""" + if not ( diff_ropath and diff_ropath.isdir() ): + assert index == (), util.uindex(index) # should only happen for first elem + self.fast_process( index, basis_path, diff_ropath ) + return + + if not basis_path: + basis_path = self.base_path.new_index( index ) + assert not basis_path.exists() + basis_path.mkdir() # Need place for later files to go into + elif not basis_path.isdir(): + basis_path.delete() + basis_path.mkdir() + self.dir_basis_path = basis_path + self.dir_diff_ropath = diff_ropath + + def end_process( self ): + """Copy directory permissions when leaving tree""" + if self.dir_diff_ropath: + self.dir_diff_ropath.copy_attribs( self.dir_basis_path ) + + def can_fast_process( self, index, basis_path, diff_ropath ): + """No need to recurse if diff_ropath isn't a directory""" + return not ( diff_ropath and diff_ropath.isdir() ) + + def fast_process( self, index, basis_path, diff_ropath ): + """For use when neither is a directory""" + if not diff_ropath: + return # no change + elif not basis_path: + if diff_ropath.difftype == "deleted": + pass # already deleted + else: + # just copy snapshot over + diff_ropath.copy( self.base_path.new_index( index ) ) + elif diff_ropath.difftype == "deleted": + if basis_path.isdir(): + basis_path.deltree() + else: + basis_path.delete() + elif not basis_path.isreg(): + if basis_path.isdir(): + basis_path.deltree() + else: + basis_path.delete() + diff_ropath.copy( basis_path ) + else: + assert diff_ropath.difftype == "diff", diff_ropath.difftype + basis_path.patch_with_attribs( diff_ropath ) + + +class TarFile_FromFileobjs: + """Like a tarfile.TarFile iterator, but read from multiple fileobjs""" + def __init__( self, fileobj_iter ): + """Make new tarinfo iterator + + fileobj_iter should be an iterator of file objects opened for + reading. They will be closed at end of reading. + + """ + self.fileobj_iter = fileobj_iter + self.tarfile, self.tar_iter = None, None + self.current_fp = None + + def __iter__( self ): + return self + + def set_tarfile( self ): + """Set tarfile from next file object, or raise StopIteration""" + if self.current_fp: + assert not self.current_fp.close() + self.current_fp = self.fileobj_iter.next() + self.tarfile = util.make_tarfile("r", self.current_fp) + self.tar_iter = iter( self.tarfile ) + + def next( self ): + if not self.tarfile: + self.set_tarfile() + try: + return self.tar_iter.next() + except StopIteration: + assert not self.tarfile.close() + self.set_tarfile() + return self.tar_iter.next() + + def extractfile( self, tarinfo ): + """Return data associated with given tarinfo""" + return self.tarfile.extractfile( tarinfo ) + + +def collate_iters( iter_list ): + """Collate iterators by index + + Input is a list of n iterators each of which must iterate elements + with an index attribute. The elements must come out in increasing + order, and the index should be a tuple itself. + + The output is an iterator which yields tuples where all elements + in the tuple have the same index, and the tuple has n elements in + it. If any iterator lacks an element with that index, the tuple + will have None in that spot. + + """ + # overflow[i] means that iter_list[i] has been exhausted + # elems[i] is None means that it is time to replenish it. + iter_num = len( iter_list ) + if iter_num == 2: + return diffdir.collate2iters( iter_list[0], iter_list[1] ) + overflow = [None] * iter_num + elems = overflow[:] + + def setrorps( overflow, elems ): + """Set the overflow and rorps list""" + for i in range( iter_num ): + if not overflow[i] and elems[i] is None: + try: + elems[i] = iter_list[i].next() + except StopIteration: + overflow[i] = 1 + elems[i] = None + + def getleastindex( elems ): + """Return the first index in elems, assuming elems isn't empty""" + return min( map( lambda elem: elem.index, filter( lambda x: x, elems ) ) ) + + def yield_tuples( iter_num, overflow, elems ): + while 1: + setrorps( overflow, elems ) + if None not in overflow: + break + + index = getleastindex( elems ) + yieldval = [] + for i in range( iter_num ): + if elems[i] and elems[i].index == index: + yieldval.append( elems[i] ) + elems[i] = None + else: + yieldval.append( None ) + yield tuple( yieldval ) + return yield_tuples( iter_num, overflow, elems ) + +class IndexedTuple: + """Like a tuple, but has .index (used previously by collate_iters)""" + def __init__( self, index, sequence ): + self.index = index + self.data = tuple( sequence ) + + def __len__( self ): + return len( self.data ) + + def __getitem__( self, key ): + """This only works for numerical keys (easier this way)""" + return self.data[key] + + def __lt__( self, other ): + return self.__cmp__( other ) == -1 + def __le__( self, other ): + return self.__cmp__( other ) != 1 + def __ne__( self, other ): + return not self.__eq__( other ) + def __gt__( self, other ): + return self.__cmp__( other ) == 1 + def __ge__( self, other ): + return self.__cmp__( other ) != -1 + + def __cmp__( self, other ): + assert isinstance( other, IndexedTuple ) + if self.index < other.index: + return - 1 + elif self.index == other.index: + return 0 + else: + return 1 + + def __eq__( self, other ): + if isinstance( other, IndexedTuple ): + return self.index == other.index and self.data == other.data + elif type( other ) is types.TupleType: + return self.data == other + else: + return None + + def __str__( self ): + return "(%s).%s" % ( ", ".join( map( str, self.data ) ), self.index ) + +def normalize_ps( patch_sequence ): + """Given an sequence of ROPath deltas, remove blank and unnecessary + + The sequence is assumed to be in patch order (later patches apply + to earlier ones). A patch is unnecessary if a later one doesn't + require it (for instance, any patches before a "delete" are + unnecessary). + + """ + result_list = [] + i = len( patch_sequence ) - 1 + while i >= 0: + delta = patch_sequence[i] + if delta is not None: + # skip blank entries + result_list.insert( 0, delta ) + if delta.difftype != "diff": + break + i -= 1 + return result_list + +def patch_seq2ropath( patch_seq ): + """Apply the patches in patch_seq, return single ropath""" + first = patch_seq[0] + assert first.difftype != "diff", "First patch in sequence " \ + "%s was a diff" % patch_seq + if not first.isreg(): + # No need to bother with data if not regular file + assert len(patch_seq) == 1, "Patch sequence isn't regular, but " \ + "has %d entries" % len(patch_seq) + return first.get_ropath() + + current_file = first.open( "rb" ) + + for delta_ropath in patch_seq[1:]: + assert delta_ropath.difftype == "diff", delta_ropath.difftype + if not isinstance( current_file, file ): + """ + librsync insists on a real file object, which we create manually + by using the duplicity.tempdir to tell us where. + """ + tempfp = tempfile.TemporaryFile( dir=tempdir.default().dir() ) + util.copyfileobj( current_file, tempfp ) + assert not current_file.close() + tempfp.seek( 0 ) + current_file = tempfp + current_file = librsync.PatchedFile( current_file, + delta_ropath.open( "rb" ) ) + result = patch_seq[-1].get_ropath() + result.setfileobj( current_file ) + return result + +def integrate_patch_iters( iter_list ): + """Combine a list of iterators of ropath patches + + The iter_list should be sorted in patch order, and the elements in + each iter_list need to be orderd by index. The output will be an + iterator of the final ROPaths in index order. + + """ + collated = collate_iters( iter_list ) + for patch_seq in collated: + normalized = normalize_ps(patch_seq) + try: + final_ropath = patch_seq2ropath(normalized) + if final_ropath.exists(): + # otherwise final patch was delete + yield final_ropath + except Exception as e: + filename = normalized[-1].get_ropath().get_relative_path() + log.Warn(_("Error '%s' patching %s") % + (util.uexc(e), util.ufn(filename)), + log.WarningCode.cannot_process, + util.escape(filename)) + +def tarfiles2rop_iter( tarfile_list, restrict_index=() ): + """Integrate tarfiles of diffs into single ROPath iter + + Then filter out all the diffs in that index which don't start with + the restrict_index. + + """ + diff_iters = [difftar2path_iter(x) for x in tarfile_list] + if restrict_index: + # Apply filter before integration + diff_iters = [filter_path_iter(x, restrict_index) for x in diff_iters] + return integrate_patch_iters( diff_iters ) + +def Write_ROPaths( base_path, rop_iter ): + """Write out ropaths in rop_iter starting at base_path + + Returns 1 if something was actually written, 0 otherwise. + + """ + ITR = IterTreeReducer( ROPath_IterWriter, [base_path] ) + return_val = 0 + for ropath in rop_iter: + return_val = 1 + ITR( ropath.index, ropath ) + ITR.Finish() + base_path.setdata() + return return_val + +class ROPath_IterWriter( ITRBranch ): + """Used in Write_ROPaths above + + We need to use an ITR because we have to update the + permissions/times of directories after we write the files in them. + + """ + def __init__( self, base_path ): + """Set base_path, Path of root of tree""" + self.base_path = base_path + self.dir_diff_ropath = None + self.dir_new_path = None + + def start_process( self, index, ropath ): + """Write ropath. Only handles the directory case""" + if not ropath.isdir(): + # Base may not be a directory, but rest should + assert ropath.index == (), ropath.index + new_path = self.base_path.new_index( index ) + if ropath.exists(): + if new_path.exists(): + new_path.deltree() + ropath.copy( new_path ) + + self.dir_new_path = self.base_path.new_index( index ) + if self.dir_new_path.exists() and not globals.force: + # base may exist, but nothing else + assert index == (), index + else: + self.dir_new_path.mkdir() + self.dir_diff_ropath = ropath + + def end_process( self ): + """Update information of a directory when leaving it""" + if self.dir_diff_ropath: + self.dir_diff_ropath.copy_attribs( self.dir_new_path ) + + def can_fast_process( self, index, ropath ): + """Can fast process (no recursion) if ropath isn't a directory""" + log.Info( _( "Writing %s of type %s" ) % + (util.ufn(ropath.get_relative_path()), ropath.type), + log.InfoCode.patch_file_writing, + "%s %s" % ( util.escape( ropath.get_relative_path() ), ropath.type ) ) + return not ropath.isdir() + + def fast_process( self, index, ropath ): + """Write non-directory ropath to destination""" + if ropath.exists(): + ropath.copy( self.base_path.new_index( index ) ) + +# +# FRETBursts - A single-molecule FRET burst analysis toolkit. +# +# Copyright (C) 2014 Antonino Ingargiola +# +""" +Module containing automated unit tests for FRETBursts. + +Running the tests requires `py.test`. +""" + +from __future__ import division +from builtins import range, zip + +from collections import namedtuple +import pytest +import numpy as np + +try: + import matplotlib +except ImportError: + has_matplotlib = False # OK to run tests without matplotlib +else: + has_matplotlib = True + matplotlib.use('Agg') # but if matplotlib is installed, use Agg + +try: + import numba +except ImportError: + has_numba = False +else: + has_numba = True + + +import fretbursts.background as bg +import fretbursts.burstlib as bl +import fretbursts.burstlib_ext as bext +from fretbursts import loader +from fretbursts import select_bursts +from fretbursts.ph_sel import Ph_sel +from fretbursts.phtools import phrates +if has_matplotlib: + import fretbursts.burst_plot as bplt + + +# data subdir in the notebook folder +DATASETS_DIR = u'notebooks/data/' + + +def _alex_process(d): + loader.alex_apply_period(d) + d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300) + d.burst_search(L=10, m=10, F=7) + +def load_dataset_1ch(process=True): + fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5" + fname = DATASETS_DIR + fn + d = loader.photon_hdf5(fname) + if process: + _alex_process(d) + return d + +def load_dataset_8ch(): + fn = "12d_New_30p_320mW_steer_3.hdf5" + fname = DATASETS_DIR + fn + d = loader.photon_hdf5(fname) + d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300) + d.burst_search(L=10, m=10, F=7) + return d + +def load_fake_pax(): + fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5" + fname = DATASETS_DIR + fn + d = loader.photon_hdf5(fname) + d.add(ALEX=False, meas_type='PAX') + loader.alex_apply_period(d) + d.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto') + d.burst_search(L=10, m=10, F=6) + return d + + +@pytest.fixture(scope="module", params=[ + load_dataset_1ch, + load_dataset_8ch, + ]) +def data(request): + load_func = request.param + d = load_func() + return d + + +@pytest.fixture(scope="module") +def data_8ch(request): + d = load_dataset_8ch() + return d + +@pytest.fixture(scope="module") +def data_1ch(request): + d = load_dataset_1ch() + return d + + +## +# List comparison functions +# + +def list_equal(list1, list2): + """Test numerical equality of all the elements in the two lists. + """ + return np.all([val1 == val2 for val1, val2 in zip(list1, list2)]) + +def list_array_equal(list1, list2): + """Test numerical equality between two lists of arrays. + """ + return np.all([np.all(arr1 == arr2) for arr1, arr2 in zip(list1, list2)]) + +def list_array_allclose(list1, list2): + """Test float closeness (np.allclose) between two lists of arrays. + """ + return np.all([np.allclose(arr1, arr2) for arr1, arr2 in zip(list1, list2)]) + +## +# Test functions +# + +def test_bg_compatlayer_for_obsolete_attrs(): + d = load_dataset_1ch(process=False) + attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa', + 'rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa') + for attr in attrs: + with pytest.raises(RuntimeError): + getattr(d, attr) + _alex_process(d) + for attr in attrs: + assert isinstance(getattr(d, attr), list) + + +def test_ph_times_compact(data_1ch): + """Test calculation of ph_times_compact.""" + def isinteger(x): + return np.equal(np.mod(x, 1), 0) + ich = 0 + d = data_1ch + + ph_d = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem')) + ph_a = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem')) + ph_dc = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'), compact=True) + ph_ac = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'), compact=True) + # Test that the difference of ph and ph_compact is multiple of + # the complementary excitation period duration + Dex_void = bl._excitation_width(d._D_ON_multich[ich], d.alex_period) + Aex_void = bl._excitation_width(d._A_ON_multich[ich], d.alex_period) + assert isinteger((ph_d - ph_dc) / Dex_void).all() + assert isinteger((ph_a - ph_ac) / Aex_void).all() + # Test that alternation histogram does not have "gaps" for ph_compact + bins = np.linspace(0, d.alex_period, num=101) + hist_dc, _ = np.histogram(ph_dc % d.alex_period, bins=bins) + hist_ac, _ = np.histogram(ph_ac % d.alex_period, bins=bins) + assert (hist_dc > 0).all() + assert (hist_ac > 0).all() + + +def test_time_min_max(): + """Test time_min and time_max for ALEX data.""" + d = load_dataset_1ch(process=False) + ich = 0 + assert d.time_max == d.ph_times_t[ich].max() * d.clk_p + assert d.time_min == d.ph_times_t[ich].min() * d.clk_p + del d._time_max, d._time_min + _alex_process(d) + assert d.time_max == d.ph_times_m[ich][-1] * d.clk_p + assert d.time_min == d.ph_times_m[ich][0] * d.clk_p + d.delete('ph_times_m') + del d._time_max, d._time_min + assert d.time_max == d.mburst[0].stop[-1] * d.clk_p + assert d.time_min == d.mburst[0].start[0] * d.clk_p + + +def test_time_min_max_multispot(data_8ch): + """Test time_min and time_max for multi-spot data.""" + d = data_8ch + assert d.time_max == max(t[-1] for t in d.ph_times_m) * d.clk_p + assert d.time_min == min(t[0] for t in d.ph_times_m) * d.clk_p + + +def test_aex_dex_ratio(data_1ch): + """Test methods computing relative D and A alternation periods durations. + """ + d = data_1ch + Dx, Ax = d.D_ON, d.A_ON + a1 = d._aex_fraction() + a2 = (Ax[1] - Ax[0]) / (Ax[1] - Ax[0] + Dx[1] - Dx[0]) + assert a1 == a2 + r1 = d._aex_dex_ratio() + r2 = (Ax[1] - Ax[0]) / (Dx[1] - Dx[0]) + assert r1 == r2 + assert (a1 / (1 - a1)) == r1 + + +def test_burst_size_pax(): + d = load_fake_pax() + aex_dex_ratio, alpha_d = d._aex_dex_ratio(), 1 - d._aex_fraction() + nd, na = d.nd[0], d.na[0] + nda = d.nda[0] + naa = d.naa[0] - d.nar[0] * aex_dex_ratio + + # Test burst size during Dex + b1 = d.burst_sizes_pax_ich(add_aex=False) + b2 = d.burst_sizes_ich(add_naa=False) + b3 = nd + na + assert (b1 == b2).all() + assert (b1 == b3).all() + + # Test naa + naa2 = d.get_naa_corrected() + naa3 = d._get_naa_ich() + assert (naa == naa2).all() + assert (naa == naa3).all() + + # Test add_naa + b1 = d.burst_sizes_ich(add_naa=True) + b2 = nd + na + naa + assert (b1 == b2).all() + + # Test add_aex with no duty-cycle correction + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=False) + b2 = nd + na + nda + d.naa[0] + b3 = nd + na + nda + naa + na * aex_dex_ratio + assert np.allclose(b1, b2) + assert np.allclose(b1, b3) + + # Test add_aex with duty-cycle correction + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True) + b2 = nd + na + nda + na * aex_dex_ratio + naa / alpha_d + assert np.allclose(b1, b2) + + # Test add_aex with duty-cycle correction, donor_ref + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=True) + b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=False) + assert np.allclose(b1, b2) + + # Test add_aex with duty-cycle correction, gamma, beta + gamma = 0.7 + beta = 0.85 + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, + gamma=gamma, beta=beta, donor_ref=True) + b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, + gamma=gamma, beta=beta, donor_ref=False) + assert np.allclose(b1 * gamma, b2) + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, + gamma=gamma, beta=beta, donor_ref=False) + + b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) + + naa / (alpha_d * beta)) + assert np.allclose(b1, b2) + + d.leakage = 0.1 + nd, na = d.nd[0], d.na[0] + nda = d.nda[0] + naa = d.naa[0] - d.nar[0] * aex_dex_ratio + + # Test add_aex with duty-cycle correction, gamma, beta + gamma = 0.7 + beta = 0.85 + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, + gamma=gamma, beta=beta, donor_ref=True) + b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, + gamma=gamma, beta=beta, donor_ref=False) + assert np.allclose(b1 * gamma, b2) + b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, + gamma=gamma, beta=beta, donor_ref=False) + + b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) + + naa / (alpha_d * beta)) + assert np.allclose(b1, b2) + + +def test_bg_calc(data): + """Smoke test bg_calc() and test deletion of bg fields. + """ + data.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300) + assert 'bg_auto_th_us0' not in data + assert 'bg_auto_F_bg' not in data + assert 'bg_th_us_user' in data + + data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7) + assert 'bg_auto_th_us0' in data + assert 'bg_auto_F_bg' in data + assert 'bg_th_us_user' not in data + + data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7, + fit_allph=False) + streams = [s for s in data.ph_streams if s != Ph_sel('all')] + bg_t = [np.sum(data.bg[s][ich] for s in streams) for ich in range(data.nch)] + assert list_array_equal(data.bg[Ph_sel('all')], bg_t) + + +def test_ph_streams(data): + sel = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')] + if data.alternated: + sel.extend([Ph_sel(Aex='Aem'), Ph_sel(Aex='Dem')]) + for s in sel: + assert s in data.ph_streams + + +def test_bg_from(data): + """Test the method .bg_from() for all the ph_sel combinations. + """ + d = data + for sel in d.ph_streams: + bg = d.bg_from(ph_sel=sel) + assert list_array_equal(bg, d.bg[sel]) + + if not (data.alternated): + assert list_array_equal(d.bg_from(Ph_sel('all')), + d.bg_from(Ph_sel(Dex='DAem'))) + return + + bg_dd = d.bg_from(ph_sel=Ph_sel(Dex='Dem')) + bg_ad = d.bg_from(ph_sel=Ph_sel(Dex='Aem')) + + bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem')) + assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)]) + + bg_aa = d.bg_from(ph_sel=Ph_sel(Aex='Aem')) + bg_da = d.bg_from(ph_sel=Ph_sel(Aex='Dem')) + + bg = d.bg_from(ph_sel=Ph_sel(Aex='DAem')) + assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_aa, bg_da)]) + + bg = d.bg_from(ph_sel=Ph_sel(Dex='Dem', Aex='Dem')) + assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_da)]) + + bg = d.bg_from(ph_sel=Ph_sel(Dex='Aem', Aex='Aem')) + assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_ad, bg_aa)]) + + bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem')) + assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)]) + + bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem', Aex='Aem')) + bg2 = [b1 + b2 + b3 for b1, b2, b3 in zip(bg_dd, bg_ad, bg_aa)] + assert list_array_equal(bg, bg2) + + +def test_iter_ph_times(data): + """Test method .iter_ph_times() for all the ph_sel combinations. + """ + # TODO add all the ph_sel combinations like in test_bg_from() + d = data + + assert list_array_equal(d.ph_times_m, d.iter_ph_times()) + + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem'))): + if d.alternated: + assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.D_ex[ich]]).all() + else: + assert (ph == d.ph_times_m[ich][~d.A_em[ich]]).all() + + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem'))): + if d.alternated: + assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.D_ex[ich]]).all() + else: + assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all() + + if d.alternated: + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Dem'))): + assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.A_ex[ich]]).all() + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Aem'))): + assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.A_ex[ich]]).all() + + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='DAem'))): + assert (ph == d.ph_times_m[ich][d.D_ex[ich]]).all() + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='DAem'))): + assert (ph == d.ph_times_m[ich][d.A_ex[ich]]).all() + + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem', Aex='Dem'))): + assert (ph == d.ph_times_m[ich][d.D_em[ich]]).all() + for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem', Aex='Aem'))): + assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all() + + for ich, ph in enumerate(d.iter_ph_times( + Ph_sel(Dex='DAem', Aex='Aem'))): + mask = d.D_ex[ich] + d.A_em[ich] * d.A_ex[ich] + assert (ph == d.ph_times_m[ich][mask]).all() + else: + assert list_array_equal(d.iter_ph_times(), + d.iter_ph_times(Ph_sel(Dex='DAem'))) + + +def test_get_ph_times_period(data): + for ich in range(data.nch): + data.get_ph_times_period(0, ich=ich) + data.get_ph_times_period(0, ich=ich, ph_sel=Ph_sel(Dex='Dem')) + + +def test_iter_ph_times_period(data): + d = data + for ich in range(data.nch): + for period, ph_period in enumerate(d.iter_ph_times_period(ich=ich)): + istart, iend = d.Lim[ich][period] + assert (ph_period == d.ph_times_m[ich][istart : iend + 1]).all() + + ph_sel = Ph_sel(Dex='Dem') + mask = d.get_ph_mask(ich=ich, ph_sel=ph_sel) + for period, ph_period in enumerate( + d.iter_ph_times_period(ich=ich, ph_sel=ph_sel)): + istart, iend = d.Lim[ich][period] + ph_period_test = d.ph_times_m[ich][istart : iend + 1] + ph_period_test = ph_period_test[mask[istart : iend + 1]] + assert (ph_period == ph_period_test).all() + + +def test_burst_search_py_cy(data): + """Test python and cython burst search with background-dependent threshold. + """ + data.burst_search(pure_python=True) + mburst1 = [b.copy() for b in data.mburst] + num_bursts1 = data.num_bursts + data.burst_search(pure_python=False) + assert np.all(num_bursts1 == data.num_bursts) + assert mburst1 == data.mburst + data.burst_search(L=30, pure_python=True) + mburst1 = [b.copy() for b in data.mburst] + num_bursts1 = data.num_bursts + data.burst_search(L=30, pure_python=False) + assert np.all(num_bursts1 == data.num_bursts) + assert mburst1 == data.mburst + + +def test_burst_search_constant_rates(data): + """Test python and cython burst search with constant threshold.""" + data.burst_search(min_rate_cps=50e3, pure_python=True) + assert (data.num_bursts > 0).all() + mburst1 = [b.copy() for b in data.mburst] + num_bursts1 = data.num_bursts + data.burst_search(min_rate_cps=50e3, pure_python=False) + assert (data.num_bursts > 0).all() + assert np.all(num_bursts1 == data.num_bursts) + assert mburst1 == data.mburst + + +def test_burst_search_L(data): + """Test burst search with different L arguments.""" + data.burst_search(L=10) + for bursts in data.mburst: + assert (bursts.counts >= 10).all() + num_bursts1 = data.num_bursts + data.burst_search(L=30) + for bursts in data.mburst: + assert (bursts.counts >= 30).all() + assert np.all(num_bursts1 > data.num_bursts) + + +def test_burst_search_with_no_bursts(data): + """Smoke test burst search when some periods have no bursts.""" + # F=600 results in periods with no bursts for the us-ALEX measurement + # and in no bursts at all for the multi-spot measurements + data.burst_search(m=10, F=600) + data.fuse_bursts(ms=1) + + +if has_matplotlib: + def test_stale_fitter_after_burst_search(data): + """Test that E/S_fitter attributes are deleted on burst search.""" + data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Dem')) + bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute + if data.alternated: + bplt.dplot(data, bplt.hist_S) # create S_fitter attribute + + data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Aem')) + assert not hasattr(data, 'E_fitter') + if data.alternated: + assert not hasattr(data, 'S_fitter') + + bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute + if data.alternated: + bplt.dplot(data, bplt.hist_S) # create S_fitter attribute + + data.calc_fret() + assert not hasattr(data, 'E_fitter') + if data.alternated: + assert not hasattr(data, 'S_fitter') + + +def test_burst_search(data): + """Smoke test and bg_bs check.""" + streams = [Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')] + if data.alternated: + streams.extend([Ph_sel(Dex='Aem', Aex='Aem'), Ph_sel(Dex='DAem')]) + for sel in streams: + data.burst_search(L=10, m=10, F=7, ph_sel=sel) + assert list_equal(data.bg_bs, data.bg_from(sel)) + + if data.alternated: + data.burst_search(m=10, F=7, ph_sel=Ph_sel(Dex='DAem'), compact=True) + data.burst_search(L=10, m=10, F=7) + + +def test_burst_search_and_gate(data_1ch): + """Test consistency of burst search and gate.""" + d = data_1ch + assert d.alternated + + # Smoke tests + bext.burst_search_and_gate(d, F=(6, 8)) + bext.burst_search_and_gate(d, m=(12, 8)) + bext.burst_search_and_gate(d, min_rate_cps=(60e3, 40e3)) + if d.nch > 1: + mr1 = 35e3 + np.arange(d.nch) * 1e3 + mr2 = 30e3 + np.arange(d.nch) * 1e3 + bext.burst_search_and_gate(d, min_rate_cps=(mr1, mr2)) + + # Consistency test + d_dex = d.copy() + d_dex.burst_search(ph_sel=Ph_sel(Dex='DAem')) + d_aex = d.copy() + d_aex.burst_search(ph_sel=Ph_sel(Aex='Aem')) + d_and = bext.burst_search_and_gate(d) + for bursts_dex, bursts_aex, bursts_and, ph in zip( + d_dex.mburst, d_aex.mburst, d_and.mburst, d.iter_ph_times()): + ph_b_mask_dex = bl.ph_in_bursts_mask(ph.size, bursts_dex) + ph_b_mask_aex = bl.ph_in_bursts_mask(ph.size, bursts_aex) + ph_b_mask_and = bl.ph_in_bursts_mask(ph.size, bursts_and) + assert (ph_b_mask_and == ph_b_mask_dex * ph_b_mask_aex).all() + + +def test_mch_count_ph_num_py_c(data): + na_py = bl.bslib.mch_count_ph_in_bursts_py(data.mburst, data.A_em) + na_c = bl.bslib.mch_count_ph_in_bursts_c(data.mburst, data.A_em) + assert list_array_equal(na_py, na_c) + assert na_py[0].dtype == np.float64 + + +def test_burst_sizes(data): + """Test for .burst_sizes_ich() and burst_sizes()""" + # Smoke test + plain_sizes = data.burst_sizes() + assert len(plain_sizes) == data.nch + # Test gamma and donor_ref arguments + bs1 = data.burst_sizes_ich(gamma=0.5, donor_ref=True) + bs2 = data.burst_sizes_ich(gamma=0.5, donor_ref=False) + assert np.allclose(bs1, bs2 / 0.5) + # Test add_naa + if data.alternated: + bs_no_naa = data.burst_sizes_ich(add_naa=False) + bs_naa = data.burst_sizes_ich(add_naa=True) + assert np.allclose(bs_no_naa + data.naa[0], bs_naa) + + # Test beta and donor_ref arguments with gamma=1 + naa1 = data.get_naa_corrected(beta=0.8, donor_ref=True) + naa2 = data.get_naa_corrected(beta=0.8, donor_ref=False) + assert np.allclose(naa1, naa2) + + # Test beta and donor_ref arguments with gamma=0.5 + naa1 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=True) + naa2 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=False) + assert np.allclose(naa1 * 0.5, naa2) + +def test_leakage(data): + """ + Test setting leakage before and after burst search + """ + # burst search, then set leakage + data.burst_search() + data.leakage = 0.04 + na1 = list(data.na) + # set leakage, then burst search + data.burst_search() + na2 = list(data.na) + assert list_array_equal(na1, na2) + + +def test_gamma(data): + """ + Test setting gamma before and after burst search + """ + # burst search, then set gamma + data.burst_search() + E0 = list(data.E) + data.gamma = 0.5 + E1 = list(data.E) + assert not list_array_equal(E0, E1) + # burst search after setting gamma + data.burst_search() + E2 = list(data.E) + assert list_array_equal(E1, E2) + + +def test_dir_ex(data_1ch): + """ + Test setting dir_ex before and after burst search + """ + data = data_1ch + # burst search, then set dir_ex + data.burst_search() + na0 = list(data.na) + data.dir_ex = 0.05 + na1 = list(data.na) + assert not list_array_equal(na0, na1) + # burst search after setting dir_ex + data.burst_search() + na2 = list(data.na) + assert list_array_equal(na1, na2) + + +def test_beta(data_1ch): + """ + Test setting beta before and after burst search + """ + data = data_1ch + # burst search, then set beta + data.burst_search() + S0 = list(data.S) + data.beta = 0.7 + S1 = list(data.S) + assert not list_array_equal(S0, S1) + # burst search after setting beta + data.burst_search() + S2 = list(data.S) + assert list_array_equal(S1, S2) + + +def test_bursts_interface(data): + d = data + for b in d.mburst: + assert (b.start == b.data[:, b._i_start]).all() + assert (b.stop == b.data[:, b._i_stop]).all() + assert (b.istart == b.data[:, b._i_istart]).all() + assert (b.istop == b.data[:, b._i_istop]).all() + + rate = 1.*b.counts/b.width + assert (b.ph_rate == rate).all() + + separation = b.start[1:] - b.stop[:-1] + assert (b.separation == separation).all() + + assert (b.stop > b.start).all() + + +def test_burst_stop_istop(data): + """Test coherence between b_end() and b_iend()""" + d = data + for ph, bursts in zip(d.ph_times_m, d.mburst): + assert (ph[bursts.istop] == bursts.stop).all() + + +def test_monotonic_burst_start(data): + """Test for monotonic burst start times.""" + d = data + for i in range(d.nch): + assert (np.diff(d.mburst[i].start) > 0).all() + + +def test_monotonic_burst_stop(data): + """Test for monotonic burst stop times.""" + d = data + for bursts in d.mburst: + assert (np.diff(bursts.stop) > 0).all() + + +def test_burst_istart_iend_size(data): + """Test consistency between burst istart, istop and counts (i.e. size)""" + d = data + for bursts in d.mburst: + counts = bursts.istop - bursts.istart + 1 + assert (counts == bursts.counts).all() + + +def test_burst_recompute_times(data): + """Test Bursts.recompute_times method.""" + d = data + for times, bursts in zip(d.ph_times_m, d.mburst): + newbursts = bursts.recompute_times(times) + assert newbursts == bursts + + +def test_burst_recompute_index(data): + """Test Bursts.recompute_index_* methods.""" + d = data + ph_sel = Ph_sel(Dex='Dem') + d.burst_search(ph_sel=ph_sel, index_allph=True) + d_sel = d.copy() + d_sel.burst_search(ph_sel=ph_sel, index_allph=False) + for times_sel, mask_sel, bursts_sel, times_allph, bursts_allph in zip( + d.iter_ph_times(ph_sel=ph_sel), + d.iter_ph_masks(ph_sel=ph_sel), + d_sel.mburst, + d.iter_ph_times(), + d.mburst): + assert (times_sel[bursts_sel.istart] == bursts_sel.start).all() + assert (times_sel[bursts_sel.istop] == bursts_sel.stop).all() + + assert (times_allph[bursts_allph.istart] == bursts_allph.start).all() + assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all() + + # Test individual methods + bursts_allph2 = bursts_sel.recompute_index_expand(mask_sel) + assert bursts_allph2 == bursts_allph + assert (times_allph[bursts_allph2.istart] == bursts_allph2.start).all() + assert (times_allph[bursts_allph2.istop] == bursts_allph2.stop).all() + + bursts_sel2 = bursts_allph.recompute_index_reduce(times_sel) + assert (times_sel[bursts_sel2.istart] == bursts_sel2.start).all() + assert (times_sel[bursts_sel2.istop] == bursts_sel2.stop).all() + assert bursts_sel2 == bursts_sel + + # Test round-trip + bursts_allph3 = bursts_sel2.recompute_index_expand(mask_sel) + assert bursts_allph3 == bursts_allph2 + assert (times_allph[bursts_allph3.istart] == bursts_allph3.start).all() + assert (times_allph[bursts_allph3.istop] == bursts_allph3.stop).all() + +## This test is only used to develop alternative implementations of +## Bursts.recompute_index_reduce() and is normally disabled as it is very slow. +#def test_burst_recompute_index_reduce(data): +# """Test different versions of Bursts.recompute_index_reduce methods. +# +# This test is very slow so it's normally disabled. +# """ +# d = data +# ph_sel = Ph_sel(Dex='Aem') +# d.burst_search(ph_sel=ph_sel) +# d_sel = d.copy() +# d_sel.burst_search(ph_sel=ph_sel, index_allph=False) +# for times_sel, bursts_sel, times_allph, bursts_allph in zip( +# d.iter_ph_times(ph_sel=ph_sel), +# d_sel.mburst, +# d.iter_ph_times(), +# d.mburst): +# assert (times_allph[bursts_allph.istart] == bursts_allph.start).all() +# assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all() +# +# bursts_sel1 = bursts_allph.recompute_index_reduce(times_sel) +# bursts_sel2 = bursts_allph.recompute_index_reduce2(times_sel) +# assert bursts_sel1 == bursts_sel2 +# assert bursts_sel == bursts_sel1 + + +def test_phrates_mtuple(data): + d = data + m = 10 + max_num_ph = 20001 + for ph in d.iter_ph_times(): + phc = ph[:max_num_ph] + rates = phrates.mtuple_rates(phc, m) + delays = phrates.mtuple_delays(phc, m) + t_rates = 0.5 * (phc[m-1:] + phc[:-m+1]) + assert phrates.mtuple_rates_max(phc, m) == rates.max() + assert phrates.mtuple_delays_min(phc, m) == delays.min() + assert phrates.default_c == 1 + assert (rates == (m - 1 - phrates.default_c) / delays).all() + assert (phrates.mtuple_rates_t(phc, m) == t_rates).all() + + +if has_numba: + def test_phrates_kde(data): + d = data + tau = 5000 # 5000 * 12.5ns = 6.25 us + for ph in d.iter_ph_times(): + # Test consistency of kde_laplace_nph and (kde_laplace, kde_rect) + rates = phrates.kde_laplace(ph, tau) + nrect = phrates.kde_rect(ph, tau*10) + ratesl, nph = phrates.nb.kde_laplace_nph(ph, tau) + assert (rates == ratesl).all() + assert (nph == nrect).all() + + # Test consistency of kde_laplace and _kde_laplace_self_numba + ratesl2, nph2 = phrates.nb.kde_laplace_self_numba(ph, tau) + assert (nph2 == nrect).all() + assert (ratesl2 == rates).all() + + # Smoke test laplace, gaussian, rect with time_axis + ratesl = phrates.kde_laplace(ph, tau, time_axis=ph+1) + assert ((ratesl >= 0) * (ratesl < 5e6)).all() + ratesg = phrates.kde_gaussian(ph, tau, time_axis=ph+1) + assert ((ratesg >= 0) * (ratesg < 5e6)).all() + ratesr = phrates.kde_rect(ph, tau, time_axis=ph+1) + assert ((ratesr >= 0) * (ratesr < 5e6)).all() + + def test_phrates_kde_cy(data): + d = data + tau = 5000 # 5000 * 12.5ns = 6.25 us + for ph in d.iter_ph_times(): + # Test consistency of kde_laplace_nph and (kde_laplace, kde_rect) + ratesg = phrates.nb.kde_gaussian_numba(ph, tau) + ratesl = phrates.nb.kde_laplace_numba(ph, tau) + ratesr = phrates.nb.kde_rect_numba(ph, tau) + ratesgc = phrates.cy.kde_gaussian_cy(ph, tau) + rateslc = phrates.cy.kde_laplace_cy(ph, tau) + ratesrc = phrates.cy.kde_rect_cy(ph, tau) + assert (ratesg == ratesgc).all() + assert (ratesl == rateslc).all() + assert (ratesr == ratesrc).all() + + +def test_burst_ph_data_functions(data): + """Tests the functions that iterate or operate on per-burst "ph-data". + """ + d = data + for bursts, ph, mask in zip(d.mburst, d.iter_ph_times(), + d.iter_ph_masks(Ph_sel(Dex='Dem'))): + bstart = bursts.start + bend = bursts.stop + + for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)): + assert ph[start] == bstart[i] + assert ph[stop-1] == bend[i] + + for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts)): + assert burst_ph[0] == bstart[i] + assert burst_ph[-1] == bend[i] + + for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts, mask=mask)): + if burst_ph.size > 0: + assert burst_ph[0] >= bstart[i] + assert burst_ph[-1] <= bend[i] + + stats = bl.burst_ph_stats(ph, bursts, mask=mask) + assert (stats[~np.isnan(stats)] >= bstart[~np.isnan(stats)]).all() + assert (stats[~np.isnan(stats)] <= bend[~np.isnan(stats)]).all() + + bistart = bursts.istart + biend = bursts.istop + bursts_mask = bl.ph_in_bursts_mask(ph.size, bursts) + for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)): + assert bursts_mask[start:stop].all() + if start > 0: + if i > 0 and biend[i-1] < bistart[i] - 1: + assert not bursts_mask[start - 1] + if stop < ph.size: + if i < bistart.size-1 and bistart[i+1] > biend[i] + 1: + assert not bursts_mask[stop] + + +def test_ph_in_bursts_ich(data): + """Tests the ph_in_bursts_ich method. + """ + d = data + for ich in range(d.nch): + ph_in_bursts = d.ph_in_bursts_ich(ich) + ph_in_bursts_dd = d.ph_in_bursts_ich(ich, ph_sel=Ph_sel(Dex='Dem')) + assert ph_in_bursts_dd.size < ph_in_bursts.size + + +def test_burst_fuse(data): + """Test 2 independent implementations of fuse_bursts for consistency. + """ + d = data + for bursts in d.mburst: + new_mbursti = bl.fuse_bursts_iter(bursts, ms=1) + new_mburstd = bl.fuse_bursts_direct(bursts, ms=1) + assert new_mbursti == new_mburstd + + +def test_burst_fuse_0ms(data): + """Test that after fusing with ms=0 the sum of bursts sizes is that same + as the number of ph in bursts (via burst selection). + """ + d = data + if d.nch == 8: + d.burst_search(L=10, m=10, F=7, computefret=False) + d.mburst[1] = bl.bslib.Bursts.empty() # Make one channel with no bursts + d._calc_burst_period() + d.calc_fret(count_ph=True) + df = d.fuse_bursts(ms=0) + for ich, bursts in enumerate(df.mburst): + mask = bl.ph_in_bursts_mask(df.ph_data_sizes[ich], bursts) + assert mask.sum() == bursts.counts.sum() + df.calc_fret(count_ph=True) + assert len(df.mburst) == len(d.mburst) + assert len(df.mburst) == d.nch + + +def test_burst_fuse_separation(data): + """Test that after fusing bursts the minimum separation is equal + to the threshold used during fusing. + """ + d = data + fuse_ms = 2 + df = d.fuse_bursts(ms=fuse_ms) + for bursts in df.mburst: + separation = bursts.separation * df.clk_p + if bursts.num_bursts > 0: + assert separation.min() >= fuse_ms * 1e-3 + + +def test_calc_sbr(data): + """Smoke test Data.calc_sbr()""" + data.calc_sbr() + + +def test_calc_max_rate(data): + """Smoke test for Data.calc_max_rate()""" + data.calc_max_rate(m=10) + if data.alternated: + data.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'), compact=True) + + +def test_burst_data(data): + """Test for bext.burst_data()""" + bext.burst_data(data, include_bg=True, include_ph_index=True) + bext.burst_data(data, include_bg=False, include_ph_index=True) + bext.burst_data(data, include_bg=True, include_ph_index=False) + bext.burst_data(data, include_bg=False, include_ph_index=False) + + +def test_print_burst_stats(data): + """Smoke test for burstlib.print_burst_stats()""" + bl.print_burst_stats(data) + + +def test_expand(data): + """Test method `expand()` for `Data()`.""" + d = data + for ich, bursts in enumerate(d.mburst): + if bursts.num_bursts == 0: + continue # if no bursts skip this ch + nd, na, bg_d, bg_a, width = d.expand(ich, width=True) + width2 = bursts.width * d.clk_p + period = d.bp[ich] + bg_d2 = d.bg_from(Ph_sel(Dex='Dem'))[ich][period] * width2 + bg_a2 = d.bg_from(Ph_sel(Dex='Aem'))[ich][period] * width2 + assert (width == width2).all() + assert (nd == d.nd[ich]).all() and (na == d.na[ich]).all() + assert (bg_d == bg_d2).all() and (bg_a == bg_a2).all() + + +def test_burst_data_ich(data): + """Test method `Data.burst_data_ich()`.""" + d = data + for ich, bursts in enumerate(d.mburst): + if bursts.num_bursts == 0: + continue # if no bursts skip this ch + burst_dict = d.burst_data_ich(ich=ich) + assert (burst_dict['size_raw'] == bursts.counts).all() + assert (burst_dict['t_start'] == bursts.start * d.clk_p).all() + assert (burst_dict['t_stop'] == bursts.stop * d.clk_p).all() + assert (burst_dict['i_start'] == bursts.istart).all() + assert (burst_dict['i_stop'] == bursts.istop).all() + assert (burst_dict['bg_period'] == d.bp[ich]).all() + nd, na, bg_d, bg_a, width = d.expand(ich, width=True) + width_ms = width * 1e3 + assert (width_ms == burst_dict['width_ms']).all() + assert (nd == burst_dict['nd']).all() + assert (na == burst_dict['na']).all() + assert (bg_d == burst_dict['bg_dd']).all() + assert (bg_a == burst_dict['bg_ad']).all() + if d.alternated: + period = d.bp[ich] + bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period] * width + bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period] * width + assert (bg_da == burst_dict['bg_da']).all() + assert (bg_aa == burst_dict['bg_aa']).all() + + +def test_burst_corrections(data): + """Test background and bleed-through corrections.""" + d = data + d.calc_ph_num(alex_all=True) + d.corrections() + leakage = d.get_leakage_array() + + for ich, bursts in enumerate(d.mburst): + if bursts.num_bursts == 0: continue # if no bursts skip this ch + nd, na, bg_d, bg_a, width = d.expand(ich, width=True) + burst_size_raw = bursts.counts + + lk = leakage[ich] + if d.alternated: + nda, naa = d.nda[ich], d.naa[ich] + period = d.bp[ich] + bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period]*width + bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period]*width + burst_size_raw2 = (nd + na + bg_d + bg_a + lk*nd + nda + naa + + bg_da + bg_aa) + assert np.allclose(burst_size_raw, burst_size_raw2) + else: + burst_size_raw2 = nd + na + bg_d + bg_a + lk*nd + assert np.allclose(burst_size_raw, burst_size_raw2) + + +def test_burst_search_consistency(data): + """Test consistency of burst data array + """ + d = data + for mb, ph in zip(d.mburst, d.iter_ph_times()): + tot_size = mb.counts + istart, istop = mb.istart, mb.istop + assert np.all(tot_size == istop - istart + 1) + start, stop, width = mb.start, mb.stop, mb.width + assert np.all(width == stop - start) + df = d.fuse_bursts(ms=0) + for mb, ph in zip(df.mburst, df.iter_ph_times()): + tot_size = mb.counts + istart, istop = mb.istart, mb.istop + assert np.all(tot_size == istop - istart + 1) + start, stop, width = mb.start, mb.stop, mb.width + assert np.all(width == stop - start) + df = d.fuse_bursts(ms=1) + for mb, ph in zip(df.mburst, df.iter_ph_times()): + tot_size = mb.counts + istart, istop = mb.istart, mb.istop + assert np.all(tot_size <= istop - istart + 1) + start, stop, width = mb.start, mb.stop, mb.width + assert np.all(width <= stop - start) + + +def test_E_and_S_with_corrections(data): + d = data + gamma = 0.5 + beta = 0.7 + d.gamma = gamma + d.beta = beta + for i, (E, nd, na) in enumerate(zip(d.E, d.nd, d.na)): + assert (E == na / (nd * gamma + na)).all() + if d.alternated: + naa = d.naa[i] + if 'PAX' in data.meas_type: + naa = d.naa[i] - d.nar[i] + assert (d.S[i] == (gamma * nd + na) / + (gamma * nd + na + naa / beta)).all() + + +def test_burst_size_da(data): + """Test that nd + na with no corrections is equal to b_size(mburst). + """ + d = data + d.calc_ph_num(alex_all=True) + if d.alternated: + for mb, nd, na, naa, nda in zip(d.mburst, d.nd, d.na, d.naa, d.nda): + tot_size = mb.counts + tot_size2 = nd + na + naa + nda + assert np.allclose(tot_size, tot_size2) + else: + for mb, nd, na in zip(d.mburst, d.nd, d.na): + tot_size = mb.counts + assert (tot_size == nd + na).all() + + +def test_burst_selection(data): + """Smoke test for burst selection methods. + """ + d = data + d.select_bursts(select_bursts.size, th1=20, th2=100, add_naa=True) + d.select_bursts(select_bursts.size, th1=20, th2=100, gamma=0.5) + + M1 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4, + kind='first') + M2 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4, + kind='second') + Mb = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4, + kind='both') + Mb2 = [m1 + m2 for m1, m2 in zip(M1, M2)] + assert list_array_equal(Mb, Mb2) + + +def test_burst_selection_nocorrections(data): + """Test burst selection with uncorrected bursts. + """ + d = data + d.burst_search(computefret=False) + d.calc_fret(count_ph=True, corrections=False) + ds1 = d.select_bursts(select_bursts.size, th1=20, th2=100, + computefret=False) + ds2 = d.select_bursts(select_bursts.size, th1=20, th2=100) + ds2.calc_ph_num() + ds2.calc_fret(corrections=False) + + assert list_array_equal(ds1.nd, ds2.nd) + assert list_array_equal(ds1.na, ds2.na) + assert list_array_equal(ds1.E, ds2.E) + if d.alternated: + assert list_array_equal(ds1.naa, ds2.naa) + assert list_array_equal(ds1.E, ds2.E) + + +def test_burst_selection_ranges(data): + """Test selection functions having a min-max range. + """ + d = data + d.burst_search() + d.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem')) + + Range = namedtuple('Range', ['min', 'max', 'getter']) + + sel_functions = dict( + E=Range(0.5, 1, None), nd=Range(30, 40, None), na=Range(30, 40, None), + time=Range(1, 61, lambda d, ich: d.mburst[ich].start * d.clk_p), + width=Range(0.5, 1.5, lambda d, ich: d.mburst[ich].width * d.clk_p*1e3), + peak_phrate=Range(50e3, 150e3, lambda d, ich: d.max_rate[ich])) + if d.alternated: + sel_functions.update(naa=Range(30, 40, None), S=Range(0.3, 0.7, None)) + + for func_name, range_ in sel_functions.items(): + func = getattr(select_bursts, func_name) + getter = range_.getter + if getter is None: + getter = lambda d, ich: d[func_name][ich] + + ds = d.select_bursts(func, args=(range_.min, range_.max)) + for ich in range(d.nch): + selected = getter(ds, ich) + assert ((selected >= range_.min) * (selected <= range_.max)).all() + + +def test_join_data(data): + """Smoke test for bext.join_data() function. + """ + d = data + dj = bext.join_data([d, d.copy()]) + assert (dj.num_bursts == 2 * d.num_bursts).all() + for bursts in dj.mburst: + assert (np.diff(bursts.start) > 0).all() + + +def test_collapse(data_8ch): + """Test the .collapse() method that joins the ch. + """ + d = data_8ch + dc1 = d.collapse() + bursts1 = dc1.mburst[0] + bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=True) + assert bursts1 == bursts2 + bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=False) + indexsort_stop = bursts2.stop.argsort() + bursts3 = bursts2[indexsort_stop] + indexsort_start = bursts3.start.argsort() + bursts4 = bursts3[indexsort_start] + assert bursts1 == bursts4 + indexsort = np.lexsort((bursts2.stop, bursts2.start)) + for name in d.burst_fields: + if name not in d or name == 'mburst': + continue + newfield = np.hstack(d[name])[indexsort] + assert np.allclose(dc1[name][0], newfield) + + dc2 = d.collapse(update_gamma=False) + for name in d.burst_fields: + if name not in d: continue + + if name == 'mburst': + assert dc1.mburst[0] == dc2.mburst[0] + else: + assert np.allclose(dc1[name][0], dc2[name][0]) + +if __name__ == '__main__': + pytest.main("-x -v fretbursts/tests/test_burstlib.py") + +from handlers import host_post_start +from handlers import host_pre_stop +from handlers import _set_send_node_event_on_error_handler +from handlers import build_persistent_event_tasks +from handlers import build_wf_event_task + + +def _get_nodes_instances(ctx, node_id): + instances = [] + for node in ctx.nodes: + for instance in node.instances: + if instance.node_id == node_id: + instances.append(instance) + return instances + + +def _get_all_nodes(ctx): + nodes = set() + for node in ctx.nodes: + nodes.add(node) + return nodes + + +def _get_all_modified_node_instances(_nodes, modification): + instances = set() + for node in _nodes: + for instance in node.instances: + if instance.modification == modification: + instances.add(instance) + return instances + + +def _get_all_nodes_instances(ctx): + node_instances = set() + for node in ctx.nodes: + for instance in node.instances: + node_instances.add(instance) + return node_instances + + +def _get_node_instance(ctx, node_id, instance_id): + for node in ctx.nodes: + if node.id == node_id: + for instance in node.instances: + if instance.id == instance_id: + return instance + return None + + +def _get_all_nodes_instances_from_nodes(nodes): + node_instances = set() + for node in nodes: + for instance in node.instances: + node_instances.add(instance) + return node_instances + + +def _get_nodes_instances_from_nodes(nodes, node_id): + instances = [] + for node in nodes: + for instance in node.instances: + if instance.node_id == node_id: + instances.append(instance) + return instances + + +def set_state_task(ctx, graph, node_id, state_name, step_id, custom_context): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: set_state_task(node_id: {0}, state_name: {1}, step_id: {2})".format(node_id, state_name, step_id)) + sequence = _set_state_task(ctx, graph, node_id, state_name, step_id, custom_context) + if sequence is not None: + sequence.name = step_id + # start = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "in"))) + # sequence.set_head(start) + # end = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "ok"))) + # sequence.add(end) + custom_context.tasks[step_id] = sequence + + +def _set_state_task(ctx, graph, node_id, state_name, step_id, custom_context): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: _set_state_task(node_id: {0}, state_name: {1}, step_id: {2})".format(node_id, state_name, step_id)) + sequence = None + instances = custom_context.modified_instances_per_node.get(node_id, []) + instance_count = len(instances) + if instance_count == 1: + instance = instances[0] + sequence = set_state_task_for_instance(ctx, graph, node_id, instance, state_name, step_id) + elif instance_count > 1: + fork = ForkjoinWrapper(graph) + for instance in instances: + fork.add(set_state_task_for_instance(ctx, graph, node_id, instance, state_name, step_id)) + msg = "state {0} on all {1} node instances".format(state_name, node_id) + sequence = forkjoin_sequence(graph, fork, instances[0], msg) + #ctx.internal.send_workflow_event( + # event_type='other', + # message="return: _set_state_task(node_id: {0}, state_name: {1}, step_id: {2}): instance_count: {3}, sequence: {4}".format(node_id, state_name, step_id, instance_count, sequence)) + return sequence + + +def set_state_task_for_instance(ctx, graph, node_id, instance, state_name, step_id): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: set_state_task_for_instance(node_id: {0}, state_name: {1}, step_id: {2}, instance: {3})".format(node_id, state_name, step_id, instance)) + task = TaskSequenceWrapper(graph) + task.add(build_wf_event_task(instance, step_id, "in")) + task.add(instance.set_state(state_name)) + task.add(build_wf_event_task(instance, step_id, "ok")) + #ctx.internal.send_workflow_event( + # event_type='other', + # message="return: set_state_task_for_instance(node_id: {0}, state_name: {1}, step_id: {2}, instance: {3})".format(node_id, state_name, step_id, instance)) + return task + + +def operation_task(ctx, graph, node_id, operation_fqname, step_id, custom_context): + sequence = _operation_task(ctx, graph, node_id, operation_fqname, step_id, custom_context) + if sequence is not None: + sequence.name = step_id + # start = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "in"))) + # sequence.set_head(start) + # end = ctx.internal.send_workflow_event(event_type='custom_workflow', message=build_wf_event(WfEvent(step_id, "ok"))) + # sequence.add(end) + custom_context.tasks[step_id] = sequence + + +def _operation_task(ctx, graph, node_id, operation_fqname, step_id, custom_context): + sequence = None + instances = custom_context.modified_instances_per_node.get(node_id, []) + first_instance = None + instance_count = len(instances) + if instance_count == 1: + instance = instances[0] + first_instance = instance + sequence = operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context) + elif instance_count > 1: + fork = ForkjoinWrapper(graph) + for instance in instances: + instance_task = operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context) + fork.add(instance_task) + msg = "operation {0} on all {1} node instances".format(operation_fqname, node_id) + first_instance = instances[0] + sequence = forkjoin_sequence(graph, fork, first_instance, msg) + return sequence + +def count_relationships(instance): + relationship_count = 0 + for relationship in instance.relationships: + relationship_count += 1 + return relationship_count + + +def should_call_relationship_op(ctx, relation_ship_instance): + result = False + source_host_instance = __get_host(ctx, relation_ship_instance.node_instance) + target_host_instance = __get_host(ctx, relation_ship_instance.target_node_instance) + if source_host_instance.id == target_host_instance.id: + # source and target are on the same instance > so the relation is considered + result = True + elif source_host_instance.node_id != target_host_instance.node_id: + # source and target are not on the same node > so the relation is considered + result = True + # source and target are on the same node but different instance (cross relationship are forbidden) + else: + result = False + ctx.internal.send_workflow_event( + event_type='other', + message="Filtering cross relationship src: instance id {0}, host ins id: {1}, host node id: {2} tgt instance id {3}, host ins id: {4}, host node id: {5}, result is {6}".format( + relation_ship_instance.node_instance.id, + source_host_instance.id, + source_host_instance.node_id, + relation_ship_instance.target_node_instance.id, + target_host_instance.id, + target_host_instance.node_id, + result + )) + return result + + +# Find the host for this instance. When the instance comes from a modification context +# the relationships are partial (only the relationships concerned by the modification are +# returned for old instances concerned by the modification), that's why we sometimes look for host in the context. +def __get_host(ctx, instance): + host = __recursively_get_host(instance) + if _is_host_node_instance(host): + return host + else: + # the host instance can not be detected in this partial context (modification related to scaling) + # so we we'll explore the host hierarchy from the context + # fisrt of all, we search for the instance in the context + instance_from_ctx = _get_node_instance(ctx, instance.node_id, instance.id) + if instance_from_ctx is None: + # the host is not a real one BUT the instance is a new instance coming from modification + # (can not be found from context) + return host + else: + return __recursively_get_host(instance_from_ctx) + + +def __recursively_get_host(instance): + host = None + if instance.relationships: + for relationship in instance.relationships: + if relationship.relationship.is_derived_from('cloudify.relationships.contained_in'): + host = relationship.target_node_instance + if host is not None: + return __recursively_get_host(host) + else: + return instance + + +# check if the pre/post configure source/target operation should be called. +# return true if the operation has not been called yet and then, register it as called. +def __check_and_register_call_config_arround(ctx, custom_context, relationship_instance, source_or_target, pre_or_post): + source_node_id = relationship_instance.node_instance.node_id + target_node_id = relationship_instance.target_node_instance.node_id + operation_target_instance_id = relationship_instance.node_instance.id + if source_or_target == 'target': + operation_target_instance_id = relationship_instance.target_node_instance.id + operation_id = source_node_id + '#' + target_node_id + '#' + pre_or_post + '_configure_' + source_or_target + '#' + operation_target_instance_id + result = False + cause = 'No known cause' + if custom_context.is_native_node(source_node_id) and source_or_target == 'source': + result = True + cause = 'source is a native node' + elif custom_context.is_native_node(target_node_id) and source_or_target == 'target': + result = True + cause = 'target is a native node' + elif source_or_target == 'source' and relationship_instance.node_instance.id not in custom_context.modified_instance_ids: + result = False + cause = 'source instance already exists (so arround operation has already been called)' + elif source_or_target == 'target' and relationship_instance.target_node_instance.id not in custom_context.modified_instance_ids: + result = False + cause = 'target instance already exists (so arround operation has already been called)' + else: + if operation_id in custom_context.executed_operation: + result = False + cause = 'operation has already been called' + else: + custom_context.executed_operation.add(operation_id) + result = True + cause = 'operation has not been called yet' + ctx.internal.send_workflow_event( + event_type='other', + message="Filtering arround conf operation {0}, result is {1} ({2})".format( + operation_id, + result, cause + )) + return result + + +def operation_task_for_instance(ctx, graph, node_id, instance, operation_fqname, step_id, custom_context): + sequence = TaskSequenceWrapper(graph) + sequence.add(build_wf_event_task(instance, step_id, "in")) + relationship_count = count_relationships(instance) + if operation_fqname == 'cloudify.interfaces.lifecycle.start': + sequence.add(instance.execute_operation(operation_fqname)) + if _is_host_node_instance(instance): + sequence.add(*host_post_start(ctx, instance)) + fork = ForkjoinWrapper(graph) + fork.add(instance.execute_operation('cloudify.interfaces.monitoring.start')) + as_target_relationships = custom_context.relationship_targets.get(instance.id, set()) + host_instance = None + if relationship_count > 0 or len(as_target_relationships) > 0: + for relationship in instance.relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + fork.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.establish')) + # if the target of the relation is not in modified instances, we should call the target.add_source + #if relationship.target_node_instance.id not in custom_context.modified_instance_ids: + fork.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.establish')) + if 'cloudify.nodes.Volume' in instance.node.type_hierarchy: + ctx.logger.info("[MAPPING] instance={} hierarchy={}".format(instance.id, instance.node.type_hierarchy)) + host_instance = __get_host(ctx, relationship.target_node_instance) + for relationship in as_target_relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + if relationship.node_instance.id not in custom_context.modified_instance_ids: + fork.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.establish')) + if relationship.node_instance.id not in custom_context.modified_instance_ids: + fork.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.establish')) + sequence.add( + instance.send_event("Start monitoring on node '{0}' instance '{1}'".format(node_id, instance.id)), + forkjoin_sequence(graph, fork, instance, "establish") + ) + if host_instance is not None and host_instance.id == instance.id: + ctx.logger.info("[MAPPING] Do nothing it is the same instance: host_instance.id={} instance.id={}".format(host_instance.id, instance.id)) + elif host_instance is not None and 'alien4cloud.mapping.device.execute' in host_instance.node.operations: + sequence.add(host_instance.send_event("Updating device attribute for instance {0} and volume {0}".format(host_instance.id, instance.id))) + sequence.add(host_instance.execute_operation("alien4cloud.mapping.device.execute", kwargs={'volume_instance_id': instance.id})) + elif operation_fqname == 'cloudify.interfaces.lifecycle.configure': + as_target_relationships = custom_context.relationship_targets.get(instance.id, set()) + if relationship_count > 0 or len(as_target_relationships) > 0: + has_preconfigure_tasks = False + preconfigure_tasks = ForkjoinWrapper(graph) + preconfigure_tasks.add(instance.send_event("preconfiguring task for instance {0}'".format(instance.id))) + for relationship in instance.relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + if __check_and_register_call_config_arround(ctx, custom_context, relationship, 'source', 'pre'): + preconfigure_tasks.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.preconfigure')) + has_preconfigure_tasks = True + for relationship in as_target_relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + if __check_and_register_call_config_arround(ctx, custom_context, relationship, 'target', 'pre'): + preconfigure_tasks.add(relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.preconfigure')) + has_preconfigure_tasks = True + if has_preconfigure_tasks: + sequence.add(forkjoin_sequence(graph, preconfigure_tasks, instance, "preconf for {0}".format(instance.id))) + # the configure operation call itself + sequence.add(instance.execute_operation(operation_fqname)) + if relationship_count > 0 or len(as_target_relationships) > 0: + has_postconfigure_tasks = False + postconfigure_tasks = ForkjoinWrapper(graph) + postconfigure_tasks.add(instance.send_event("postconfiguring task for instance {0}'".format(instance.id))) + for relationship in instance.relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + if __check_and_register_call_config_arround(ctx, custom_context, relationship, 'source', 'post'): + postconfigure_tasks.add(relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.postconfigure')) + has_postconfigure_tasks = True + for relationship in as_target_relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + if __check_and_register_call_config_arround(ctx, custom_context, relationship, 'target', 'post'): + task = relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.postconfigure') + _set_send_node_event_on_error_handler(task, instance, "Error occurred while postconfiguring node as target for relationship {0} - ignoring...".format(relationship)) + postconfigure_tasks.add(task) + has_postconfigure_tasks = True + if has_postconfigure_tasks: + sequence.add(forkjoin_sequence(graph, postconfigure_tasks, instance, "postconf for {0}".format(instance.id))) + + persistent_event_tasks = build_persistent_event_tasks(instance) + if persistent_event_tasks is not None: + sequence.add(*persistent_event_tasks) + + elif operation_fqname == 'cloudify.interfaces.lifecycle.stop': + if _is_host_node_instance(instance): + sequence.add(*host_pre_stop(instance)) + task = instance.execute_operation(operation_fqname) + _set_send_node_event_on_error_handler(task, instance, "Error occurred while stopping node - ignoring...") + sequence.add(task) + as_target_relationships = custom_context.relationship_targets.get(instance.id, set()) + # now call unlink onto relations' target + if relationship_count > 0 or len(as_target_relationships) > 0: + fork = ForkjoinWrapper(graph) + for relationship in instance.relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + unlink_task_source = relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.unlink') + _set_send_node_event_on_error_handler(unlink_task_source, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id)) + fork.add(unlink_task_source) + # call unlink on the target of the relationship + unlink_task_target = relationship.execute_target_operation('cloudify.interfaces.relationship_lifecycle.unlink') + _set_send_node_event_on_error_handler(unlink_task_target, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id)) + fork.add(unlink_task_target) + for relationship in as_target_relationships: + # add a condition in order to test if it's a 1-1 rel + if should_call_relationship_op(ctx, relationship): + if relationship.node_instance.id not in custom_context.modified_instance_ids: + unlink_task_source = relationship.execute_source_operation('cloudify.interfaces.relationship_lifecycle.unlink') + _set_send_node_event_on_error_handler(unlink_task_source, instance, "Error occurred while unlinking node from target {0} - ignoring...".format(relationship.target_id)) + fork.add(unlink_task_source) + + if fork.is_not_empty(): + sequence.add(forkjoin_sequence(graph, fork, instance, "unlink")) + + elif operation_fqname == 'cloudify.interfaces.lifecycle.delete': + task = instance.execute_operation(operation_fqname) + _set_send_node_event_on_error_handler(task, instance, "Error occurred while deleting node - ignoring...") + sequence.add(task) + else: + # the default behavior : just do the job + sequence.add(instance.execute_operation(operation_fqname)) + sequence.add(build_wf_event_task(instance, step_id, "ok")) + return sequence + + +def forkjoin_sequence(graph, forkjoin_wrapper, instance, label): + sequence = TaskSequenceWrapper(graph) + sequence.add(instance.send_event("forking: {0} instance '{1}'".format(label, instance.id))) + sequence.add(forkjoin_wrapper) + sequence.add(instance.send_event("joining: {0} instance '{1}'".format(label, instance.id))) + return sequence + + +def link_tasks(graph, source_id, target_id, custom_context): + sources = custom_context.tasks.get(source_id, None) + targets = custom_context.tasks.get(target_id, None) + _link_tasks(graph, sources, targets) + + +def _link_tasks(graph, sources, targets): + if sources is None: + return + if isinstance(sources, TaskSequenceWrapper) or isinstance(sources, ForkjoinWrapper): + sources = sources.first_tasks + else: + sources = [sources] + if targets is None: + return + if isinstance(targets, TaskSequenceWrapper) or isinstance(targets, ForkjoinWrapper): + targets = targets.last_tasks + else: + targets = [targets] + for source in sources: + for target in targets: + graph.add_dependency(source, target) + + +def _is_host_node_instance(node_instance): + return is_host_node(node_instance.node) + + +def is_host_node(node): + return 'cloudify.nodes.Compute' in node.type_hierarchy + + +# def _relationship_operations(node_instance, operation): +# tasks_with_targets = _relationship_operations_with_targets( +# node_instance, operation) +# return [task for task, _ in tasks_with_targets] +# +# +# def _relationship_operations_with_targets(node_instance, operation): +# tasks = [] +# for relationship in node_instance.relationships: +# tasks += _relationship_operations_with_target(relationship, operation) +# return tasks +# +# +# def _relationship_operations_with_target(relationship, operation): +# return [ +# (relationship.execute_source_operation(operation), +# relationship.target_id) +# ] + +def generate_native_node_workflows(ctx, graph, custom_context, stage): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: generate_native_node_workflows(stage: {0})".format(stage)) + native_node_ids = custom_context.get_native_node_ids() + # for each native node we build a sequence of operations + native_sequences = {} + for node_id in native_node_ids: + sequence = _generate_native_node_sequence(ctx, graph, node_id, stage, custom_context) + if sequence is not None: + native_sequences[node_id] = sequence + # we explore the relations between native nodes to orchestrate tasks 'a la' cloudify + for node_id in native_node_ids: + sequence = native_sequences.get(node_id, None) + if sequence is not None: + node = ctx.get_node(node_id) + for relationship in node.relationships: + target_id = relationship.target_id + target_sequence = native_sequences.get(target_id, None) + if target_sequence is not None: + if stage == 'install': + _link_tasks(graph, sequence, target_sequence) + elif stage == 'uninstall': + _link_tasks(graph, target_sequence, sequence) + # when posible, associate the native sequences with the corresponding delegate workflow step + for node_id in native_node_ids: + sequence = native_sequences.get(node_id, None) + if sequence is not None: + delegate_wf_step = custom_context.delegate_wf_steps.get(node_id, None) + if delegate_wf_step is not None: + # the delegate wf step can be associated to a native sequence + # let's register it in the custom context to make it available for non native tasks links + custom_context.tasks[delegate_wf_step] = sequence + # and remove it from the original map + del custom_context.delegate_wf_steps[node_id] + # this sequence is now associated with a delegate wf step, just remove it from the map + del native_sequences[node_id] + # iterate through remaining delegate_wf_steps + # the remaining ones are those that are not associated with a native sequence + # at this stage, we are not able to associate these remaining delegate wf steps (we don't have + # a bridge between java world model and python world model (cfy blueprint) ) + # so: we fork all remaining sequences and we associate the fork-join to all remaining delegate step + if len(custom_context.delegate_wf_steps) > 0 and len(native_sequences) > 0: + # let's create a fork join with remaining sequences + fork = ForkjoinWrapper(graph) + for sequence in native_sequences.itervalues(): + fork.add(sequence) + for stepId in custom_context.delegate_wf_steps.itervalues(): + # we register this fork using the delegate wf step id + # so it can be referenced later to link non native tasks + custom_context.tasks[stepId] = fork + #ctx.internal.send_workflow_event( + # event_type='other', + # message="return: generate_native_node_workflows") + +def _generate_native_node_sequence(ctx, graph, node_id, stage, custom_context): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: _generate_native_node_sequence(node: {0}, stage: {1})".format(node, stage)) + if stage == 'install': + return _generate_native_node_sequence_install(ctx, graph, node_id, custom_context) + elif stage == 'uninstall': + return _generate_native_node_sequence_uninstall(ctx, graph, node_id, custom_context) + else: + return None + + +def _generate_native_node_sequence_install(ctx, graph, node_id, custom_context): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: _generate_native_node_sequence_install(node: {0})".format(node)) + sequence = TaskSequenceWrapper(graph) + sequence.add(_set_state_task(ctx, graph, node_id, 'initial', '_{0}_initial'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'creating', '_{0}_creating'.format(node_id), custom_context)) + sequence.add(_operation_task(ctx, graph, node_id, 'cloudify.interfaces.lifecycle.create', '_create_{0}'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'created', '_{0}_created'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'configuring', '_{0}_configuring'.format(node_id), custom_context)) + sequence.add(_operation_task(ctx, graph, node_id, 'cloudify.interfaces.lifecycle.configure', '_configure_{0}'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'configured', '_{0}_configured'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'starting', '_{0}_starting'.format(node_id), custom_context)) + sequence.add(_operation_task(ctx, graph, node_id, 'cloudify.interfaces.lifecycle.start', '_start_{0}'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'started', '_{0}_started'.format(node_id), custom_context)) + #ctx.internal.send_workflow_event( + # event_type='other', + # message="return: _generate_native_node_sequence_install(node: {0})".format(node)) + return sequence + + +def _generate_native_node_sequence_uninstall(ctx, graph, node_id, custom_context): + #ctx.internal.send_workflow_event( + # event_type='other', + # message="call: _generate_native_node_sequence_uninstall(node: {0})".format(node)) + sequence = TaskSequenceWrapper(graph) + sequence.add(_set_state_task(ctx, graph, node_id, 'stopping', '_{0}_stopping'.format(node_id), custom_context)) + sequence.add(_operation_task(ctx, graph, node_id, 'cloudify.interfaces.lifecycle.stop', '_stop_{0}'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'stopped', '_{0}_stopped'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'deleting', '_{0}_deleting'.format(node_id), custom_context)) + sequence.add(_operation_task(ctx, graph, node_id, 'cloudify.interfaces.lifecycle.delete', '_delete_{0}'.format(node_id), custom_context)) + sequence.add(_set_state_task(ctx, graph, node_id, 'deleted', '_{0}_deleted'.format(node_id), custom_context)) + #ctx.internal.send_workflow_event( + # event_type='other', + # message="return: _generate_native_node_sequence_uninstall(node: {0})".format(node)) + return sequence + + +class ForkjoinWrapper(object): + + def __init__(self, graph, name=""): + self.graph = graph + self.first_tasks = [] + self.last_tasks = [] + self.name = name + + def add(self, *tasks): + for element in tasks: + if isinstance(element, ForkjoinWrapper): + self.first_tasks.extend(element.first_tasks) + self.last_tasks.extend(element.last_tasks) + elif isinstance(element, TaskSequenceWrapper): + self.first_tasks.extend(element.first_tasks) + self.last_tasks.extend(element.last_tasks) + else: + self.first_tasks.append(element) + self.last_tasks.append(element) + self.graph.add_task(element) + + def is_not_empty(self): + return len(self.first_tasks) > 0 + + +class TaskSequenceWrapper(object): + + def __init__(self, graph, name=""): + self.graph = graph + self.first_tasks = None + self.last_tasks = None + self.name = name + + def set_head(self, task): + if self.first_tasks is None: + self.add(task) + else: + self.graph.add_task(task) + for next_task in self.first_tasks: + self.graph.add_dependency(next_task, task) + self.first_tasks = [task] + + def add(self, *tasks): + for element in tasks: + tasks_head = None + tasks_queue = None + if isinstance(element, ForkjoinWrapper): + tasks_head = element.first_tasks + tasks_queue = element.last_tasks + elif isinstance(element, TaskSequenceWrapper): + tasks_head = element.first_tasks + tasks_queue = element.last_tasks + else: + tasks_head = [element] + tasks_queue = tasks_head + self.graph.add_task(element) + for task in tasks_head: + if self.last_tasks is not None: + for last_task in self.last_tasks: + self.graph.add_dependency(task, last_task) + if tasks_head is not None: + if self.first_tasks is None: + self.first_tasks = tasks_head + if tasks_queue is not None: + self.last_tasks = tasks_queue + + +class CustomContext(object): + def __init__(self, ctx, modified_instances, modified_and_related_nodes): + # this set to store pre/post conf source/target operation that have been already called + # we'll use a string like sourceId#targetId#pre|post#source|target + self.executed_operation = set() + self.tasks = {} + self.relationship_targets = {} + # a set of nodeId for which wf is customized (designed using a4c) + self.customized_wf_nodes = set() + # a dict of nodeId -> stepId : nodes for which we need to manage the wf ourself + self.delegate_wf_steps = {} + # the modified nodes are those that have been modified (all in case of install or uninstall workflow, result of modification in case of scaling) + self.modified_instances_per_node = self.__get_instances_per_node(modified_instances) + self.modified_instance_ids = self.__get_instance_ids(modified_instances) + # contains the modifed nodes and the related nodes + self.modified_and_related_nodes = modified_and_related_nodes + self.__build_relationship_targets(ctx) + + ''' + Given an instance array, build a map where: + - key is node_id + - value is an instance array (all instances for this particular node_id) + ''' + def __get_instances_per_node(self, instances): + instances_per_node = {} + for instance in instances: + node_instances = instances_per_node.get(instance.node_id, None) + if node_instances is None: + node_instances = [] + instances_per_node[instance.node_id] = node_instances + node_instances.append(instance) + return instances_per_node + + def __get_instance_ids(self, instances): + instance_ids = set() + for instance in instances: + instance_ids.add(instance.id) + return instance_ids + + ''' + Build a map containing all the relationships that target a given node instance : + - key is target_id (a node instance id) + - value is a set of relationships (all relationship that target this node) + ''' + def __build_relationship_targets(self, ctx): + node_instances = _get_all_nodes_instances_from_nodes(self.modified_and_related_nodes) + for node_instance in node_instances: + ctx.internal.send_workflow_event( + event_type='other', + message="found an instance of {0} : {1}".format(node_instance.node_id, node_instance.id)) + for relationship in node_instance.relationships: + target_relationships = self.relationship_targets.get(relationship.target_id, None) + if target_relationships is None: + target_relationships = set() + self.relationship_targets[relationship.target_id] = target_relationships + ctx.internal.send_workflow_event( + event_type='other', + message="found a relationship that targets {0} from {1}".format(relationship.target_id, relationship.node_instance.id)) + target_relationships.add(relationship) + + def add_customized_wf_node(self, nodeId): + self.customized_wf_nodes.add(nodeId) + + def is_native_node(self, node_id): + if node_id in self.customized_wf_nodes: + return False + else: + return True + + # the native node are those for which workflow is not managed by a4c + def get_native_node_ids(self): + native_node_ids = set() + for node_id in self.modified_instances_per_node.keys(): + if node_id not in self.customized_wf_nodes: + native_node_ids.add(node_id) + return native_node_ids + + def register_native_delegate_wf_step(self, nodeId, stepId): + self.delegate_wf_steps[nodeId] = stepId + +#! /usr/bin/env python + +# These methods are used by test.py and waf to look for and read the +# .ns3rc configuration file, which is used to specify the modules that +# should be enabled + +import os +import sys + +def get_list_from_file(file_path, list_name): + '''Looks for a Python list called list_name in the file specified + by file_path and returns it. + + If the file or list name aren't found, this function will return + an empty list. + + ''' + + list = [] + + # Read in the file if it exists. + if os.path.exists(file_path): + file_in = open(file_path, "r") + + # Look for the list. + list_string = "" + parsing_multiline_list = False + for line in file_in: + + # Remove any comments. + if '#' in line: + (line, comment) = line.split('#', 1) + + # Parse the line. + if list_name in line or parsing_multiline_list: + list_string += line + + # Handle multiline lists. + if ']' not in list_string: + parsing_multiline_list = True + else: + # Evaluate the list once its end is reached. + # Make the split function only split it once. + list = eval(list_string.split('=', 1)[1].strip()) + break + + # Close the file + file_in.close() + + return list + + +def get_bool_from_file(file_path, bool_name, value_if_missing): + '''Looks for a Python boolean variable called bool_name in the + file specified by file_path and returns its value. + + If the file or boolean variable aren't found, this function will + return value_if_missing. + + ''' + + # Read in the file if it exists. + if os.path.exists(file_path): + file_in = open(file_path, "r") + + # Look for the boolean variable. + bool_found = False + for line in file_in: + + # Remove any comments. + if '#' in line: + (line, comment) = line.split('#', 1) + + # Parse the line. + if bool_name in line: + # Evaluate the variable's line once it is found. Make + # the split function only split it once. + bool = eval(line.split('=', 1)[1].strip()) + bool_found = True + break + + # Close the file + file_in.close() + + if bool_found: + return bool + else: + return value_if_missing + + +# Reads the NS-3 configuration file and returns a list of enabled modules. +# +# This function first looks for the ns3 configuration file (.ns3rc) in +# the current working directory and then looks in the ~ directory. +def read_config_file(): + # By default, all modules will be enabled, examples will be disabled, + # and tests will be disabled. + modules_enabled = ['all_modules'] + examples_enabled = False + tests_enabled = False + + # See if the ns3 configuration file exists in the current working + # directory and then look for it in the ~ directory. + config_file_exists = False + dot_ns3rc_name = '.ns3rc' + dot_ns3rc_path = dot_ns3rc_name + if not os.path.exists(dot_ns3rc_path): + dot_ns3rc_path = os.path.expanduser('~/') + dot_ns3rc_name + if not os.path.exists(dot_ns3rc_path): + # Return all of the default values if the .ns3rc file can't be found. + return (config_file_exists, modules_enabled, examples_enabled, tests_enabled) + + config_file_exists = True + + # Read in the enabled modules. + modules_enabled = get_list_from_file(dot_ns3rc_path, 'modules_enabled') + if not modules_enabled: + # Enable all modules if the modules_enabled line can't be found. + modules_enabled = ['all_modules'] + + # Read in whether examples should be enabled or not. + value_if_missing = False + examples_enabled = get_bool_from_file(dot_ns3rc_path, 'examples_enabled', value_if_missing) + + # Read in whether tests should be enabled or not. + value_if_missing = False + tests_enabled = get_bool_from_file(dot_ns3rc_path, 'tests_enabled', value_if_missing) + + return (config_file_exists, modules_enabled, examples_enabled, tests_enabled) + + +#!/usr/bin/env python + +# Copyright (c) 2009 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +Verify handling of build variants. + +TODO: Right now, only the SCons generator supports this, so the +test case is SCons-specific. In particular, it relise on SCons' +ability to rebuild in response to changes on the command line. It +may be simpler to just drop this feature if the other generators +can't be made to behave the same way. +""" + +import TestGyp + +test = TestGyp.TestGyp(formats=['scons']) + +test.run_gyp('variants.gyp', chdir='src') + +test.relocate('src', 'relocate/src') + +test.build('variants.gyp', chdir='relocate/src') + +test.run_built_executable('variants', + chdir='relocate/src', + stdout="Hello, world!\n") + +test.sleep() +test.build('variants.gyp', 'VARIANT1=1', chdir='relocate/src') + +test.run_built_executable('variants', + chdir='relocate/src', + stdout="Hello from VARIANT1\n") + +test.sleep() +test.build('variants.gyp', 'VARIANT2=1', chdir='relocate/src') + +test.run_built_executable('variants', + chdir='relocate/src', + stdout="Hello from VARIANT2\n") + +test.pass_test() + +# -*- coding: utf-8 -*- + +""" +Copyright (C) 2012 Dariusz Suchojad + +Licensed under LGPLv3, see LICENSE.txt for terms and conditions. +""" + +from __future__ import absolute_import, division, print_function, unicode_literals + +# anyjson +from anyjson import loads + +# nose +from nose.tools import eq_ + +# Bunch +from bunch import Bunch + +# Zato +from zato.common.odb.model import Service +from zato.common.test import Expected, rand_bool, rand_int, rand_string, ServiceTestCase +from zato.server.service.internal.service import GetList, GetByName + +def get_data(): + return Bunch({'id':rand_int(), 'name':rand_string(), 'is_active':rand_bool(), + 'impl_name':rand_string(), 'is_internal':rand_bool()}) + +class GetListTestCase(ServiceTestCase): + def test_response(self): + request = {'cluster_id': rand_int()} + + expected_keys = get_data().keys() + expected_data = tuple(get_data() for x in range(rand_int(10))) + expected = Expected() + + for datum in expected_data: + item = Service() + for key in expected_keys: + value = getattr(datum, key) + setattr(item, key, value) + expected.add(item) + + instance = self.invoke(GetList, request, expected) + response = loads(instance.response.payload.getvalue())[GetList.SimpleIO.response_elem] + + for idx, item in enumerate(response): + expected = expected_data[idx] + given = Bunch(item) + + for key in expected_keys: + given_value = getattr(given, key) + expected_value = getattr(expected, key) + eq_(given_value, expected_value) + +class GetByNameTestCase(ServiceTestCase): + def test_response(self): + request = {'cluster_id':rand_int(), 'name':rand_string()} + + expected_id = rand_int() + expected_name = rand_string() + expected_is_active = rand_bool() + expected_impl_name = rand_string() + expected_is_internal = rand_bool() + + service = Service() + service.id = expected_id + service.name = expected_name + service.is_active = expected_is_active + service.impl_name = expected_impl_name + service.is_internal = expected_is_internal + + expected = Expected() + expected.add(service) + + instance = self.invoke(GetByName, request, expected) + response = Bunch(loads(instance.response.payload.getvalue())['zato_service_get_by_name_response']) + + eq_(response.id, expected_id) + eq_(response.name, expected_name) + eq_(response.is_active, expected_is_active) + eq_(response.impl_name, expected_impl_name) + eq_(response.is_internal, expected_is_internal) + eq_(response.usage, 0) + +#!/usr/bin/env python +# Copyright (c) 2012 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import cStringIO +import logging +import unittest +import sys + +import isolate_common + + +class TraceInputs(unittest.TestCase): + def _test(self, value, expected): + actual = cStringIO.StringIO() + isolate_common.pretty_print(value, actual) + self.assertEquals(expected, actual.getvalue()) + + def test_pretty_print_empty(self): + self._test({}, '{\n}\n') + + def test_pretty_print_mid_size(self): + value = { + 'variables': { + 'bar': [ + 'file1', + 'file2', + ], + }, + 'conditions': [ + ['OS=\"foo\"', { + 'variables': { + isolate_common.KEY_UNTRACKED: [ + 'dir1', + 'dir2', + ], + isolate_common.KEY_TRACKED: [ + 'file4', + 'file3', + ], + 'command': ['python', '-c', 'print "H\\i\'"'], + 'read_only': True, + 'relative_cwd': 'isol\'at\\e', + }, + }], + ['OS=\"bar\"', { + 'variables': {}, + }, { + 'variables': {}, + }], + ], + } + expected = ( + "{\n" + " 'variables': {\n" + " 'bar': [\n" + " 'file1',\n" + " 'file2',\n" + " ],\n" + " },\n" + " 'conditions': [\n" + " ['OS=\"foo\"', {\n" + " 'variables': {\n" + " 'command': [\n" + " 'python',\n" + " '-c',\n" + " 'print \"H\\i\'\"',\n" + " ],\n" + " 'relative_cwd': 'isol\\'at\\\\e',\n" + " 'read_only': True\n" + " 'isolate_dependency_tracked': [\n" + " 'file4',\n" + " 'file3',\n" + " ],\n" + " 'isolate_dependency_untracked': [\n" + " 'dir1',\n" + " 'dir2',\n" + " ],\n" + " },\n" + " }],\n" + " ['OS=\"bar\"', {\n" + " 'variables': {\n" + " },\n" + " }, {\n" + " 'variables': {\n" + " },\n" + " }],\n" + " ],\n" + "}\n") + self._test(value, expected) + + +if __name__ == '__main__': + VERBOSE = '-v' in sys.argv + logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR) + unittest.main() + +import tempfile + +class HxmlWriter: + def __init__(self): + self.file_out = open(tempfile.gettempdir()+'/backplot.xml', 'w') + self.file_out.write('\n') + self.file_out.write('\n') + self.t = None + self.oldx = None + self.oldy = None + self.oldz = None + + def __del__(self): + self.file_out.write('\n') + self.file_out.close() + + def write(self, s): + self.file_out.write(s) + +############################################ + + def begin_ncblock(self): + self.file_out.write('\t\n') + + def end_ncblock(self): + self.file_out.write('\t\n') + + def add_text(self, s, col, cdata): + s.replace('&', '&') + s.replace('"', '"') + s.replace('<', '<') + s.replace('>', '>') + if (cdata) : (cd1, cd2) = ('') + else : (cd1, cd2) = ('', '') + if (col != None) : self.file_out.write('\t\t'+cd1+s+cd2+'\n') + else : self.file_out.write('\t\t'+cd1+s+cd2+'\n') + + def set_mode(self, units): + self.file_out.write('\t\t\n') + + def metric(self): + self.set_mode(units = 1.0) + + def imperial(self): + self.set_mode(units = 25.4) + + def begin_path(self, col): + if (col != None) : self.file_out.write('\t\t\n') + else : self.file_out.write('\t\t\n') + + def end_path(self): + self.file_out.write('\t\t\n') + + def rapid(self, x=None, y=None, z=None, a=None, b=None, c=None): + self.begin_path("rapid") + self.add_line(x, y, z, a, b, c) + self.end_path() + + def feed(self, x=None, y=None, z=None, a=None, b=None, c=None): + self.begin_path("feed") + self.add_line(x, y, z, a, b, c) + self.end_path() + + def arc_cw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None): + self.begin_path("feed") + self.add_arc(x, y, z, i, j, k, r, -1) + self.end_path() + + def arc_ccw(self, x=None, y=None, z=None, i=None, j=None, k=None, r=None): + self.begin_path("feed") + self.add_arc(x, y, z, i, j, k, r, 1) + self.end_path() + + def tool_change(self, id): + self.file_out.write('\t\t\n') + self.t = id + + def current_tool(self): + return self.t + + def spindle(self, s, clockwise): + pass + + def feedrate(self, f): + pass + + def add_line(self, x, y, z, a = None, b = None, c = None): + self.file_out.write('\t\t\t\n') + if x != None: self.oldx = x + if y != None: self.oldy = y + if z != None: self.oldz = z + + def add_arc(self, x, y, z, i, j, k, r = None, d = None): + self.file_out.write('\t\t\t\n') + if x != None: self.oldx = x + if y != None: self.oldy = y + if z != None: self.oldz = z + +from datacash import models, facade, gateway +from oscar.apps.order import processing +from oscar.apps.payment import exceptions + +from .models import PaymentEventType + + +class EventHandler(processing.EventHandler): + + def handle_shipping_event(self, order, event_type, lines, + line_quantities, **kwargs): + self.validate_shipping_event( + order, event_type, lines, line_quantities, **kwargs) + + payment_event = None + if event_type.name == 'Shipped': + # Take payment for order lines + self.take_payment_for_lines( + order, lines, line_quantities) + self.consume_stock_allocations( + order, lines, line_quantities) + + shipping_event = self.create_shipping_event( + order, event_type, lines, line_quantities, + reference=kwargs.get('reference', None)) + if payment_event: + shipping_event.payment_events.add(payment_event) + + def take_payment_for_lines(self, order, lines, line_quantities): + settle, __ = PaymentEventType.objects.get_or_create( + name="Settle") + amount = self.calculate_amount_to_settle( + settle, order, lines, line_quantities) + # Take payment with Datacash (using pre-auth txn) + txn = self.get_datacash_preauth(order) + + f = facade.Facade() + try: + datacash_ref = f.fulfill_transaction( + order.number, amount, txn.datacash_reference, + txn.auth_code) + except exceptions.PaymentError as e: + self.create_note(order, "Attempt to settle %.2f failed: %s" % ( + amount, e)) + raise + + # Record message + msg = "Payment of %.2f settled using reference '%s' from initial transaction" + msg = msg % (amount, txn.datacash_reference) + self.create_note(order, msg) + + # Update order source + source = order.sources.get(source_type__name='Datacash') + source.debit(amount, reference=datacash_ref) + + # Create payment event + return self.create_payment_event( + order, settle, amount, lines, line_quantities, + reference=datacash_ref) + + def calculate_amount_to_settle( + self, event_type, order, lines, line_quantities): + amt = self.calculate_payment_event_subtotal( + event_type, lines, line_quantities) + num_payments = order.payment_events.filter( + event_type=event_type).count() + if num_payments == 0: + # Include shipping charge in first payment + amt += order.shipping_incl_tax + return amt + + def get_datacash_preauth(self, order): + """ + Return the (successful) pre-auth Datacash transaction for + the passed order. + """ + transactions = models.OrderTransaction.objects.filter( + order_number=order.number, method=gateway.PRE, status=1) + if transactions.count() == 0: + raise exceptions.PaymentError( + "Unable to take payment as no PRE-AUTH " + "transaction found for this order") + return transactions[0] + +# -*- coding: utf-8 -*- +"""`sphinx_rtd_theme` lives on `Github`_. + +.. _github: https://www.github.com/snide/sphinx_rtd_theme + +""" +from setuptools import setup +from sphinx_rtd_theme import __version__ + + +setup( + name='sphinx_rtd_theme', + version=__version__, + url='https://github.com/snide/sphinx_rtd_theme/', + license='MIT', + author='Dave Snider', + author_email='dave.snider@gmail.com', + description='ReadTheDocs.org theme for Sphinx, 2013 version.', + long_description=open('README.rst').read(), + zip_safe=False, + packages=['sphinx_rtd_theme'], + package_data={'sphinx_rtd_theme': [ + 'theme.conf', + '*.html', + 'static/css/*.css', + 'static/js/*.js', + 'static/font/*.*' + ]}, + include_package_data=True, + install_requires=open('requirements.txt').read().splitlines(), + classifiers=[ + 'Development Status :: 3 - Alpha', + 'License :: OSI Approved :: BSD License', + 'Environment :: Console', + 'Environment :: Web Environment', + 'Intended Audience :: Developers', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Operating System :: OS Independent', + 'Topic :: Documentation', + 'Topic :: Software Development :: Documentation', + ], +) + +import sys +import unittest + +__all__ = ['get_config_vars', 'get_path'] + +try: + # Python 2.7 or >=3.2 + from sysconfig import get_config_vars, get_path +except ImportError: + from distutils.sysconfig import get_config_vars, get_python_lib + def get_path(name): + if name not in ('platlib', 'purelib'): + raise ValueError("Name must be purelib or platlib") + return get_python_lib(name=='platlib') + +try: + # Python >=3.2 + from tempfile import TemporaryDirectory +except ImportError: + import shutil + import tempfile + class TemporaryDirectory(object): + """" + Very simple temporary directory context manager. + Will try to delete afterward, but will also ignore OS and similar + errors on deletion. + """ + def __init__(self): + self.name = None # Handle mkdtemp raising an exception + self.name = tempfile.mkdtemp() + + def __enter__(self): + return self.name + + def __exit__(self, exctype, excvalue, exctrace): + try: + shutil.rmtree(self.name, True) + except OSError: #removal errors are not the only possible + pass + self.name = None + + +unittest_main = unittest.main + +_PY31 = (3, 1) <= sys.version_info[:2] < (3, 2) +if _PY31: + # on Python 3.1, translate testRunner==None to TextTestRunner + # for compatibility with Python 2.6, 2.7, and 3.2+ + def unittest_main(*args, **kwargs): + if 'testRunner' in kwargs and kwargs['testRunner'] is None: + kwargs['testRunner'] = unittest.TextTestRunner + return unittest.main(*args, **kwargs) + +# -*- coding: UTF-8 -*- +## Copyright 2012 Luc Saffre +## This file is part of the Lino project. +## Lino is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 3 of the License, or +## (at your option) any later version. +## Lino is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## You should have received a copy of the GNU General Public License +## along with Lino; if not, see . + +r""" +Experimental. Not maintained and not used within Lino. + +This package contains mainly a copy of `odfpy.contrib.html2odt` +(https://joinup.ec.europa.eu/software/odfpy). +One modification by LS in the original files: + +- :file:`html2odt.py` : changed import statement for `emptycontent` + +The content of this file (:file:`__init__.py`) is my own derived work. + +I wanted to use the HTML2ODTParser not for grabbing +a complete HTML page and creating a full .odt file, +but for converting a chunk of HTML into a chunk of ODF XML. + +Example: + +>>> html = '''This is
a simple test.''' +>>> print html2odt(html) +This isa simple test. + +Note that the Parser ignores the ``...`` tag. +Seems that this simply isn't yet implemented. + +""" + +from lino.utils.html2odt.html2odt import HTML2ODTParser +from odf import element + + +class RawXML(element.Childless, element.Node): + #~ nodeType = element.Node.ELEMENT_NODE + nodeType = element.Node.TEXT_NODE + def __init__(self, raw_xml): + self.raw_xml = raw_xml + #~ super(RawXML,self).__init__() + + def toXml(self,level,f): + f.write(self.raw_xml) + + + +def html2odt(data,encoding='iso8859-1',baseurl=''): + p = HTML2ODTParser(encoding, baseurl) + #~ failure = "" + p.feed(data) + text = p.result() # Flush the buffer + #~ return RawXML(text) + return text + + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + + +# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +from boto.s3.user import User + + +class ResultSet(list): + """ + The ResultSet is used to pass results back from the Amazon services + to the client. It is light wrapper around Python's :py:class:`list` class, + with some additional methods for parsing XML results from AWS. + Because I don't really want any dependencies on external libraries, + I'm using the standard SAX parser that comes with Python. The good news is + that it's quite fast and efficient but it makes some things rather + difficult. + + You can pass in, as the marker_elem parameter, a list of tuples. + Each tuple contains a string as the first element which represents + the XML element that the resultset needs to be on the lookout for + and a Python class as the second element of the tuple. Each time the + specified element is found in the XML, a new instance of the class + will be created and popped onto the stack. + + :ivar str next_token: A hash used to assist in paging through very long + result sets. In most cases, passing this value to certain methods + will give you another 'page' of results. + """ + def __init__(self, marker_elem=None): + list.__init__(self) + if isinstance(marker_elem, list): + self.markers = marker_elem + else: + self.markers = [] + self.marker = None + self.key_marker = None + self.next_marker = None # avail when delimiter used + self.next_key_marker = None + self.next_upload_id_marker = None + self.next_version_id_marker = None + self.next_generation_marker = None + self.version_id_marker = None + self.is_truncated = False + self.next_token = None + self.status = True + + def startElement(self, name, attrs, connection): + for t in self.markers: + if name == t[0]: + obj = t[1](connection) + self.append(obj) + return obj + if name == 'Owner': + # Makes owner available for get_service and + # perhaps other lists where not handled by + # another element. + self.owner = User() + return self.owner + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'IsTruncated': + self.is_truncated = self.to_boolean(value) + elif name == 'Marker': + self.marker = value + elif name == 'KeyMarker': + self.key_marker = value + elif name == 'NextMarker': + self.next_marker = value + elif name == 'NextKeyMarker': + self.next_key_marker = value + elif name == 'VersionIdMarker': + self.version_id_marker = value + elif name == 'NextVersionIdMarker': + self.next_version_id_marker = value + elif name == 'NextGenerationMarker': + self.next_generation_marker = value + elif name == 'UploadIdMarker': + self.upload_id_marker = value + elif name == 'NextUploadIdMarker': + self.next_upload_id_marker = value + elif name == 'Bucket': + self.bucket = value + elif name == 'MaxUploads': + self.max_uploads = int(value) + elif name == 'MaxItems': + self.max_items = int(value) + elif name == 'Prefix': + self.prefix = value + elif name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'ItemName': + self.append(value) + elif name == 'NextToken': + self.next_token = value + elif name == 'nextToken': + self.next_token = value + # Code exists which expects nextToken to be available, so we + # set it here to remain backwards-compatibile. + self.nextToken = value + elif name == 'BoxUsage': + try: + connection.box_usage += float(value) + except: + pass + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + else: + setattr(self, name, value) + + +class BooleanResult(object): + + def __init__(self, marker_elem=None): + self.status = True + self.request_id = None + self.box_usage = None + + def __repr__(self): + if self.status: + return 'True' + else: + return 'False' + + def __nonzero__(self): + return self.status + + def startElement(self, name, attrs, connection): + return None + + def to_boolean(self, value, true_value='true'): + if value == true_value: + return True + else: + return False + + def endElement(self, name, value, connection): + if name == 'return': + self.status = self.to_boolean(value) + elif name == 'StatusCode': + self.status = self.to_boolean(value, 'Success') + elif name == 'IsValid': + self.status = self.to_boolean(value, 'True') + elif name == 'RequestId': + self.request_id = value + elif name == 'requestId': + self.request_id = value + elif name == 'BoxUsage': + self.request_id = value + else: + setattr(self, name, value) + +import binascii +import struct + +from django.forms import ValidationError + +from .const import ( + GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL, + STRUCT_SIZE, +) + + +def pack(structure, data): + """ + Pack data into hex string with little endian format. + """ + return binascii.hexlify(struct.pack('<' + structure, *data)).upper() + + +def unpack(structure, data): + """ + Unpack little endian hexlified binary string into a list. + """ + return struct.unpack('<' + structure, binascii.unhexlify(data)) + + +def chunk(data, index): + """ + Split a string into two parts at the input index. + """ + return data[:index], data[index:] + + +def get_pgraster_srid(data): + """ + Extract the SRID from a PostGIS raster string. + """ + if data is None: + return + # The positional arguments here extract the hex-encoded srid from the + # header of the PostGIS raster string. This can be understood through + # the POSTGIS_HEADER_STRUCTURE constant definition in the const module. + return unpack('i', data[106:114])[0] + + +def from_pgraster(data): + """ + Convert a PostGIS HEX String into a dictionary. + """ + if data is None: + return + + # Split raster header from data + header, data = chunk(data, 122) + header = unpack(POSTGIS_HEADER_STRUCTURE, header) + + # Parse band data + bands = [] + pixeltypes = [] + while data: + # Get pixel type for this band + pixeltype, data = chunk(data, 2) + pixeltype = unpack('B', pixeltype)[0] + + # Subtract nodata byte from band nodata value if it exists + has_nodata = pixeltype >= 64 + if has_nodata: + pixeltype -= 64 + + # Convert datatype from PostGIS to GDAL & get pack type and size + pixeltype = POSTGIS_TO_GDAL[pixeltype] + pack_type = GDAL_TO_STRUCT[pixeltype] + pack_size = 2 * STRUCT_SIZE[pack_type] + + # Parse band nodata value. The nodata value is part of the + # PGRaster string even if the nodata flag is True, so it always + # has to be chunked off the data string. + nodata, data = chunk(data, pack_size) + nodata = unpack(pack_type, nodata)[0] + + # Chunk and unpack band data (pack size times nr of pixels) + band, data = chunk(data, pack_size * header[10] * header[11]) + band_result = {'data': binascii.unhexlify(band)} + + # If the nodata flag is True, set the nodata value. + if has_nodata: + band_result['nodata_value'] = nodata + + # Append band data to band list + bands.append(band_result) + + # Store pixeltype of this band in pixeltypes array + pixeltypes.append(pixeltype) + + # Check that all bands have the same pixeltype. + # This is required by GDAL. PostGIS rasters could have different pixeltypes + # for bands of the same raster. + if len(set(pixeltypes)) != 1: + raise ValidationError("Band pixeltypes are not all equal.") + + return { + 'srid': int(header[9]), + 'width': header[10], 'height': header[11], + 'datatype': pixeltypes[0], + 'origin': (header[5], header[6]), + 'scale': (header[3], header[4]), + 'skew': (header[7], header[8]), + 'bands': bands, + } + + +def to_pgraster(rast): + """ + Convert a GDALRaster into PostGIS Raster format. + """ + # Return if the raster is null + if rast is None or rast == '': + return + + # Prepare the raster header data as a tuple. The first two numbers are + # the endianness and the PostGIS Raster Version, both are fixed by + # PostGIS at the moment. + rasterheader = ( + 1, 0, len(rast.bands), rast.scale.x, rast.scale.y, + rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y, + rast.srs.srid, rast.width, rast.height, + ) + + # Hexlify raster header + result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader) + + for band in rast.bands: + # The PostGIS raster band header has exactly two elements, a 8BUI byte + # and the nodata value. + # + # The 8BUI stores both the PostGIS pixel data type and a nodata flag. + # It is composed as the datatype integer plus 64 as a flag for existing + # nodata values: + # 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64) + # + # For example, if the byte value is 71, then the datatype is + # 71-64 = 7 (32BSI) and the nodata value is True. + structure = 'B' + GDAL_TO_STRUCT[band.datatype()] + + # Get band pixel type in PostGIS notation + pixeltype = GDAL_TO_POSTGIS[band.datatype()] + + # Set the nodata flag + if band.nodata_value is not None: + pixeltype += 64 + + # Pack band header + bandheader = pack(structure, (pixeltype, band.nodata_value or 0)) + + # Hexlify band data + band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper() + + # Add packed header and band data to result + result += bandheader + band_data_hex + + # Cast raster to string before passing it to the DB + return result.decode() + +from django.shortcuts import render_to_response, get_object_or_404 +from django.http import HttpResponseRedirect, Http404 +from django.template import RequestContext +from django.core.urlresolvers import reverse +from django.utils.translation import ugettext_lazy as _ +from django.contrib.auth.models import User +from django.contrib.auth.decorators import login_required +from django.views.generic import date_based +from django.conf import settings +from django.db.models import Q + +from swaps.models import Offer, Swap +from swaps.forms import OfferForm, ProposeSwapForm, ProposingOfferForm + +try: + from notification import models as notification +except ImportError: + notification = None + +try: + from threadedcomments.models import ThreadedComment + forums = True +except ImportError: + forums = False + +@login_required +def offers(request, username=None): + offers = Offer.objects.filter(state=1).order_by("-offered_time") + return render_to_response("swaps/offers.html", {"offers": offers}, context_instance=RequestContext(request)) + +@login_required +def offer(request, offer_id): + offer = get_object_or_404(Offer, id=offer_id) + #deletable = offer.is_deletable() + return render_to_response("swaps/offer.html", { + "offer": offer, + #"deletable": deletable, + }, context_instance=RequestContext(request)) + +@login_required +def your_offers(request): + user = request.user + offers = Offer.objects.filter(offerer=user).order_by("-offered_time") + return render_to_response("swaps/your_offers.html", {"offers": offers}, context_instance=RequestContext(request)) + +@login_required +def swap(request, swap_id): + swap = get_object_or_404(Swap, id=swap_id) + return render_to_response("swaps/swap.html", { + "swap": swap, + }, context_instance=RequestContext(request)) + +@login_required +def proposed_by_you(request): + swaps = Swap.objects.filter(proposing_offer__offerer=request.user, state=1).order_by("-proposed_time") + return render_to_response("swaps/proposed_by_you.html", {"swaps": swaps}, context_instance=RequestContext(request)) + +@login_required +def proposed_to_you(request): + swaps = Swap.objects.filter(responding_offer__offerer=request.user, state=1).order_by("-proposed_time") + return render_to_response("swaps/proposed_to_you.html", {"swaps": swaps}, context_instance=RequestContext(request)) + +@login_required +def accepted_swaps(request): + swaps = Swap.objects.filter( + Q(state=2, proposing_offer__offerer=request.user) | + Q(state=2, responding_offer__offerer=request.user)).order_by("-accepted_time") + return render_to_response("swaps/accepted.html", {"swaps": swaps}, context_instance=RequestContext(request)) + +@login_required +def dead_swaps(request): + swaps = Swap.objects.filter( + Q(state__gt=3, proposing_offer__offerer=request.user) | + Q(state__gt=3, responding_offer__offerer=request.user)).order_by("-killed_time") + return render_to_response("swaps/dead.html", {"swaps": swaps}, context_instance=RequestContext(request)) + + +@login_required +def new(request): + if request.method == "POST": + if request.POST["action"] == "create": + offer_form = OfferForm(request.POST) + if offer_form.is_valid(): + offer = offer_form.save(commit=False) + offer.offerer = request.user + offer.save() + request.user.message_set.create(message=_("Successfully saved offer '%s'") % offer.short_description) + #if notification: + # if friends: # @@@ might be worth having a shortcut for sending to all friends + # notification.send((x['friend'] for x in Friendship.objects.friends_for_user(offer.offerer)), "offer_friend_post", {"post": blog}) + + return HttpResponseRedirect(reverse("offer_list_yours")) + else: + offer_form = OfferForm() + else: + offer_form = OfferForm() + + return render_to_response("swaps/new_offer.html", { + "offer_form": offer_form + }, context_instance=RequestContext(request)) + + +@login_required +def edit_offer(request, offer_id): + offer = get_object_or_404(Offer, id=offer_id) + if offer.offerer != request.user: + request.user.message_set.create(message="You cannot edit offers that are not yours") + return HttpResponseRedirect(reverse("offer_list_yours")) + return_to = request.GET['returnto'] + if request.method == "POST": + if request.POST["action"] == "update": + offer_form = OfferForm(request.POST, instance=offer) + if offer_form.is_valid(): + offer = offer_form.save(commit=False) + offer.save() + if notification: + for swap in offer.proposed_swaps.filter(state=1): + notification.send([swap.responding_offer.offerer,], "swaps_proposing_offer_changed", + {"creator": request.user, + "swap": swap, + "proposing_offer": swap.proposing_offer, + "responding_offer": swap.responding_offer}) + for swap in offer.responding_swaps.filter(state=1): + notification.send([swap.proposing_offer.offerer,], "swaps_responding_offer_changed", + {"creator": request.user, + "swap": swap, + "proposing_offer": swap.proposing_offer, + "responding_offer": swap.responding_offer}) + + request.user.message_set.create(message=_("Successfully updated offer '%s'") % offer.short_description) + return HttpResponseRedirect(reverse(return_to)) + else: + offer_form = OfferForm(instance=offer) + else: + offer_form = OfferForm(instance=offer) + + return render_to_response("swaps/edit_offer.html", { + "offer_form": offer_form, + "offer": offer, + }, context_instance=RequestContext(request)) + +@login_required +def delete_offer(request, offer_id): + offer = get_object_or_404(Offer, id=offer_id) + if offer.offerer != request.user: + request.user.message_set.create(message="You cannot delete offers that are not yours") + return HttpResponseRedirect(reverse("offer_list_yours")) + if request.method == "POST": + offer.delete() + return HttpResponseRedirect(reverse("offer_list_yours")) + +@login_required +def cancel_offer(request, offer_id): + offer = get_object_or_404(Offer, id=offer_id) + if offer.offerer != request.user: + request.user.message_set.create(message="You cannot cancel offers that are not yours") + return HttpResponseRedirect(reverse("offer_list_yours")) + if request.method == "POST": + offer.cancel() + return HttpResponseRedirect(reverse("offer_list_yours")) + +@login_required +def propose_swap(request, offer_id): + offer = get_object_or_404(Offer, id=offer_id) + if request.method == "POST": + swap_form = ProposeSwapForm(request.POST) + offer_form = ProposingOfferForm(request.POST) + swap = None + if swap_form.is_valid(): + swap = swap_form.save(commit=False) + swap.responding_offer = offer + swap.save() + if offer_form.is_valid(): + proposing_offer = offer_form.save(commit=False) + proposing_offer.offerer = request.user + proposing_offer.save() + swap = Swap( + proposing_offer=proposing_offer, + responding_offer=offer) + swap.save() + if swap: + if notification: + notification.send([offer.offerer,], "swaps_proposal", + {"creator": request.user, + "swap": swap, + "proposing_offer": swap.proposing_offer, + "responding_offer": swap.responding_offer}) + return HttpResponseRedirect(reverse("proposed_by_you")) + else: + swap_form = ProposeSwapForm() + swap_form.fields['proposing_offer'].queryset = Offer.objects.filter(offerer=request.user, state=1) + offer_form = ProposingOfferForm() + return render_to_response("swaps/propose_swap.html", { + "offer": offer, + "swap_form": swap_form, + "offer_form": offer_form, + }, context_instance=RequestContext(request)) + +@login_required +def accept_swap(request, swap_id): + swap = get_object_or_404(Swap, id=swap_id) + swap.accept() + swap.save() + if notification: + notification.send([swap.proposing_offer.offerer,], "swaps_acceptance", + {"creator": request.user, + "swap": swap, + "proposing_offer": swap.proposing_offer, + "responding_offer": swap.responding_offer}) + return HttpResponseRedirect(reverse("accepted_swaps")) + +@login_required +def reject_swap(request, swap_id): + swap = get_object_or_404(Swap, id=swap_id) + swap.reject() + swap.save() + if notification: + notification.send([swap.proposing_offer.offerer,], "swaps_rejection", + {"creator": request.user, + "swap": swap, + "proposing_offer": swap.proposing_offer, + "responding_offer": swap.responding_offer}) + return HttpResponseRedirect(reverse("dead_swaps")) + +@login_required +def cancel_swap(request, swap_id): + swap = get_object_or_404(Swap, id=swap_id) + swap.cancel() + swap.save() + if notification: + notification.send([swap.responding_offer.offerer,], "swaps_cancellation", + {"creator": request.user, + "swap": swap, + "proposing_offer": swap.proposing_offer, + "responding_offer": swap.responding_offer}) + return HttpResponseRedirect(reverse("dead_swaps")) + + +"""Regression test module for DependencyGraph""" +from __future__ import print_function + +from future.utils import viewitems + +from miasm.expression.expression import ExprId, ExprInt, ExprAssign, \ + ExprCond, ExprLoc, LocKey +from miasm.core.locationdb import LocationDB +from miasm.ir.analysis import LifterModelCall +from miasm.ir.ir import IRBlock, AssignBlock +from miasm.core.graph import DiGraph +from miasm.analysis.depgraph import DependencyNode, DependencyGraph +from itertools import count +from pdb import pm +import re + +loc_db = LocationDB() + +EMULATION = True +try: + import z3 +except ImportError: + EMULATION = False + +STEP_COUNTER = count() +A = ExprId("a", 32) +B = ExprId("b", 32) +C = ExprId("c", 32) +D = ExprId("d", 32) +R = ExprId("r", 32) +COND = ExprId("cond", 32) + +A_INIT = ExprId("a_init", 32) +B_INIT = ExprId("b_init", 32) +C_INIT = ExprId("c_init", 32) +D_INIT = ExprId("d_init", 32) + +PC = ExprId("pc", 32) +SP = ExprId("sp", 32) + +CST0 = ExprInt(0x0, 32) +CST1 = ExprInt(0x1, 32) +CST2 = ExprInt(0x2, 32) +CST3 = ExprInt(0x3, 32) +CST22 = ExprInt(0x22, 32) +CST23 = ExprInt(0x23, 32) +CST24 = ExprInt(0x24, 32) +CST33 = ExprInt(0x33, 32) +CST35 = ExprInt(0x35, 32) +CST37 = ExprInt(0x37, 32) + +LBL0 = loc_db.add_location("lbl0", 0) +LBL1 = loc_db.add_location("lbl1", 1) +LBL2 = loc_db.add_location("lbl2", 2) +LBL3 = loc_db.add_location("lbl3", 3) +LBL4 = loc_db.add_location("lbl4", 4) +LBL5 = loc_db.add_location("lbl5", 5) +LBL6 = loc_db.add_location("lbl6", 6) + +def gen_irblock(label, exprs_list): + """ Returns an IRBlock. + Used only for tests purpose + """ + irs = [] + for exprs in exprs_list: + if isinstance(exprs, AssignBlock): + irs.append(exprs) + else: + irs.append(AssignBlock(exprs)) + + irbl = IRBlock(loc_db, label, irs) + return irbl + + +class Regs(object): + + """Fake registers for tests """ + regs_init = {A: A_INIT, B: B_INIT, C: C_INIT, D: D_INIT} + all_regs_ids = [A, B, C, D, SP, PC, R] + + +class Arch(object): + + """Fake architecture for tests """ + regs = Regs() + + def getpc(self, attrib): + return PC + + def getsp(self, attrib): + return SP + + +class IRATest(LifterModelCall): + + """Fake IRA class for tests""" + + def __init__(self, loc_db): + arch = Arch() + super(IRATest, self).__init__(arch, 32, loc_db) + self.IRDst = ExprId("IRDst", 32) + self.ret_reg = R + + def get_out_regs(self, _): + return set([self.ret_reg, self.sp]) + + +def bloc2graph(irgraph, label=False, lines=True): + """Render dot graph of @blocks""" + + escape_chars = re.compile('[' + re.escape('{}') + ']') + label_attr = 'colspan="2" align="center" bgcolor="grey"' + edge_attr = 'label = "%s" color="%s" style="bold"' + td_attr = 'align="left"' + block_attr = 'shape="Mrecord" fontname="Courier New"' + + out = ["digraph asm_graph {"] + fix_chars = lambda x: '\\' + x.group() + + # Generate basic blocks + out_blocks = [] + for label in irgraph.nodes(): + assert isinstance(label, LocKey) + label_names = irgraph.loc_db.get_location_names(label) + label_name = list(label_names)[0] + + if hasattr(irgraph, 'blocks'): + irblock = irgraph.blocks[label] + else: + irblock = None + if isinstance(label, LocKey): + out_block = '%s [\n' % label_name + else: + out_block = '%s [\n' % label + out_block += "%s " % block_attr + out_block += 'label =<' + + block_label = '' % ( + label_attr, label_name) + block_html_lines = [] + if lines and irblock is not None: + for assignblk in irblock: + for dst, src in viewitems(assignblk): + if False: + out_render = "%.8X') + out_block += "%s " % block_label + out_block += block_html_lines + "
%s
" % (0, td_attr) + else: + out_render = "" + out_render += escape_chars.sub(fix_chars, "%s = %s" % (dst, src)) + block_html_lines.append(out_render) + block_html_lines.append(" ") + block_html_lines.pop() + block_html_lines = ('
' % td_attr + + ('
' % td_attr).join(block_html_lines) + + '
> ];" + out_blocks.append(out_block) + + out += out_blocks + # Generate links + for src, dst in irgraph.edges(): + assert isinstance(src, LocKey) + src_names = irgraph.loc_db.get_location_names(src) + assert isinstance(dst, LocKey) + dst_names = irgraph.loc_db.get_location_names(dst) + + src_name = list(src_names)[0] + dst_name = list(dst_names)[0] + + edge_color = "black" + out.append('%s -> %s' % (src_name, + dst_name) + + '[' + edge_attr % ("", edge_color) + '];') + + out.append("}") + return '\n'.join(out) + + +def dg2graph(graph, label=False, lines=True): + """Render dot graph of @blocks""" + + escape_chars = re.compile('[' + re.escape('{}') + ']') + label_attr = 'colspan="2" align="center" bgcolor="grey"' + edge_attr = 'label = "%s" color="%s" style="bold"' + td_attr = 'align="left"' + block_attr = 'shape="Mrecord" fontname="Courier New"' + + out = ["digraph asm_graph {"] + fix_chars = lambda x: '\\' + x.group() + + # Generate basic blocks + out_blocks = [] + for node in graph.nodes(): + if isinstance(node, DependencyNode): + name = loc_db.pretty_str(node.loc_key) + node_name = "%s %s %s" % (name, + node.element, + node.line_nb) + else: + node_name = str(node) + out_block = '%s [\n' % hash(node) + out_block += "%s " % block_attr + out_block += 'label =<' + + block_label = '' % ( + label_attr, node_name) + block_html_lines = [] + block_html_lines = ('') + out_block += "%s " % block_label + out_block += block_html_lines + "
%s
' % td_attr + + ('
' % td_attr).join(block_html_lines) + + '
> ];" + out_blocks.append(out_block) + + out += out_blocks + # Generate links + for src, dst in graph.edges(): + edge_color = "black" + out.append('%s -> %s ' % (hash(src), + hash(dst)) + + '[' + edge_attr % ("", edge_color) + '];') + + out.append("}") + return '\n'.join(out) + + +print(" [+] Test dictionary equality") +DNA = DependencyNode(LBL2, A, 0) +DNB = DependencyNode(LBL1, B, 1) +DNC = DependencyNode(LBL1, C, 0) +DNB2 = DependencyNode(LBL1, B, 1) +DNC2 = DependencyNode(LBL1, C, 0) +DNB3 = DependencyNode(LBL1, B, 1) +DNC3 = DependencyNode(LBL1, C, 0) + +IRA = IRATest(loc_db) +IRDst = IRA.IRDst +END = ExprId("END", IRDst.size) +# graph 1 + +G1_IRA = IRA.new_ircfg() + +G1_IRB0 = gen_irblock(LBL0, [[ExprAssign(C, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G1_IRB1 = gen_irblock(LBL1, [[ExprAssign(B, C), ExprAssign(IRDst, ExprLoc(LBL2, 32))]]) +G1_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, B), ExprAssign(IRDst, END)]]) + +for irb in [G1_IRB0, G1_IRB1, G1_IRB2]: + G1_IRA.add_irblock(irb) + +# graph 2 + +G2_IRA = IRA.new_ircfg() + +G2_IRB0 = gen_irblock(LBL0, [[ExprAssign(C, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G2_IRB1 = gen_irblock(LBL1, [[ExprAssign(B, CST2), ExprAssign(IRDst, ExprLoc(LBL2, 32))]]) +G2_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, B + C), ExprAssign(IRDst, END)]]) + +for irb in [G2_IRB0, G2_IRB1, G2_IRB2]: + G2_IRA.add_irblock(irb) + + +# graph 3 + +G3_IRA = IRA.new_ircfg() + +G3_IRB0 = gen_irblock( + LBL0, + [ + [ExprAssign(C, CST1), ExprAssign( + IRDst, ExprCond( + COND, + ExprLoc(LBL1, 32), + ExprLoc(LBL2, 32) + ) + ) + ] + ] +) + +G3_IRB1 = gen_irblock(LBL1, [[ExprAssign(B, CST2), ExprAssign(IRDst, ExprLoc(LBL3, 32))]]) +G3_IRB2 = gen_irblock(LBL2, [[ExprAssign(B, CST3), ExprAssign(IRDst, ExprLoc(LBL3, 32))]]) +G3_IRB3 = gen_irblock(LBL3, [[ExprAssign(A, B + C), ExprAssign(IRDst, END)]]) + +for irb in [G3_IRB0, G3_IRB1, G3_IRB2, G3_IRB3]: + G3_IRA.add_irblock(irb) + +# graph 4 + +G4_IRA = IRA.new_ircfg() + +G4_IRB0 = gen_irblock(LBL0, [[ExprAssign(C, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G4_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(C, C + CST2)], + [ExprAssign(IRDst, + ExprCond( + C, + ExprLoc(LBL2, 32), + ExprLoc(LBL1, 32)) + ) + ]] +) + +G4_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, B), ExprAssign(IRDst, END)]]) + +for irb in [G4_IRB0, G4_IRB1, G4_IRB2]: + G4_IRA.add_irblock(irb) + + +# graph 5 + +G5_IRA = IRA.new_ircfg() + +G5_IRB0 = gen_irblock(LBL0, [[ExprAssign(B, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G5_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(B, B + CST2)], + [ExprAssign( + IRDst, + ExprCond( + B, + ExprLoc(LBL2, 32), + ExprLoc(LBL1, 32) + ) + ) + ] + ] +) + +G5_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, B), ExprAssign(IRDst, END)]]) + +for irb in [G5_IRB0, G5_IRB1, G5_IRB2]: + G5_IRA.add_irblock(irb) + +# graph 6 + +G6_IRA = IRA.new_ircfg() + +G6_IRB0 = gen_irblock(LBL0, [[ExprAssign(B, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G6_IRB1 = gen_irblock(LBL1, [[ExprAssign(A, B), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) + +for irb in [G6_IRB0, G6_IRB1]: + G6_IRA.add_irblock(irb) + +# graph 7 + +G7_IRA = IRA.new_ircfg() + +G7_IRB0 = gen_irblock(LBL0, [[ExprAssign(C, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G7_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(B, C)], + [ExprAssign(A, B)], + [ExprAssign( + IRDst, + ExprCond( + COND, + ExprLoc(LBL1, 32), + ExprLoc(LBL2, 32) + ) + ) + ] + ] +) + +G7_IRB2 = gen_irblock(LBL2, [[ExprAssign(D, A), ExprAssign(IRDst, END)]]) + +for irb in [G7_IRB0, G7_IRB1, G7_IRB2]: + G7_IRA.add_irblock(irb) + +# graph 8 + +G8_IRA = IRA.new_ircfg() + +G8_IRB0 = gen_irblock(LBL0, [[ExprAssign(C, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G8_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(B, C)], + [ExprAssign(C, D), + ExprAssign( + IRDst, + ExprCond( + COND, + ExprLoc(LBL1, 32), + ExprLoc(LBL2, 32) + ) + ) + ] + ] +) +G8_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, B), ExprAssign(IRDst, END)]]) + +for irb in [G8_IRB0, G8_IRB1, G8_IRB2]: + G8_IRA.add_irblock(irb) + +# graph 9 is graph 8 + +# graph 10 + +G10_IRA = IRA.new_ircfg() + +G10_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(B, B + CST2), + ExprAssign( + IRDst, + ExprCond( + COND, + ExprLoc(LBL1, 32), + ExprLoc(LBL2, 32) + ) + ) + ] + ] +) + +G10_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, B), ExprAssign(IRDst, END)]]) + +for irb in [G10_IRB1, G10_IRB2]: + G10_IRA.add_irblock(irb) + +# graph 11 + +G11_IRA = IRA.new_ircfg() + +G11_IRB0 = gen_irblock( + LBL0, + [ + [ExprAssign(A, CST1), + ExprAssign(B, CST2), + ExprAssign(IRDst, ExprLoc(LBL1, 32)) + ] + ] +) + +G11_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(A, B), + ExprAssign(B, A), + ExprAssign(IRDst, ExprLoc(LBL2, 32)) + ] + ] +) + +G11_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, A - B), ExprAssign(IRDst, END)]]) + +for irb in [G11_IRB0, G11_IRB1, G11_IRB2]: + G11_IRA.add_irblock(irb) + +# graph 12 + +G12_IRA = IRA.new_ircfg() + +G12_IRB0 = gen_irblock(LBL0, [[ExprAssign(B, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G12_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(A, B)], + [ExprAssign(B, B + CST2), + ExprAssign( + IRDst, + ExprCond( + COND, + ExprLoc(LBL1, 32), + ExprLoc(LBL2, 32) + ) + ) + ] + ] +) + +G12_IRB2 = gen_irblock(LBL2, [[ExprAssign(B, A), ExprAssign(IRDst, END)]]) + +for irb in [G12_IRB0, G12_IRB1, G12_IRB2]: + G12_IRA.add_irblock(irb) + + +# graph 13 + +G13_IRA = IRA.new_ircfg() + +G13_IRB0 = gen_irblock(LBL0, [[ExprAssign(A, CST1)], + #[ExprAssign(B, A)], + [ExprAssign(IRDst, + ExprLoc(LBL1, 32))]]) +G13_IRB1 = gen_irblock(LBL1, [[ExprAssign(C, A)], + #[ExprAssign(A, A + CST1)], + [ExprAssign(IRDst, + ExprCond( + R, + ExprLoc(LBL2, 32), + ExprLoc(LBL3, 32) + ) + )]]) + +G13_IRB2 = gen_irblock(LBL2, [[ExprAssign(B, A + CST3)], [ExprAssign(A, B + CST3)], + [ExprAssign(IRDst, + ExprLoc(LBL1, 32))]]) + +G13_IRB3 = gen_irblock(LBL3, [[ExprAssign(R, C), ExprAssign(IRDst, END)]]) + +for irb in [G13_IRB0, G13_IRB1, G13_IRB2, G13_IRB3]: + G13_IRA.add_irblock(irb) + +# graph 14 + +G14_IRA = IRA.new_ircfg() + +G14_IRB0 = gen_irblock(LBL0, [[ExprAssign(A, CST1)], + [ExprAssign(IRDst, + ExprLoc(LBL1, 32))] + ]) +G14_IRB1 = gen_irblock(LBL1, [[ExprAssign(B, A)], + [ExprAssign(IRDst, + ExprCond( + C, + ExprLoc(LBL2, 32), + ExprLoc(LBL3, 32) + ) + ) + ] + ]) + +G14_IRB2 = gen_irblock(LBL2, [[ExprAssign(D, A)], + [ExprAssign(A, D + CST1)], + [ExprAssign(IRDst, + ExprLoc(LBL1, 32))] + ]) + +G14_IRB3 = gen_irblock(LBL3, [[ExprAssign(R, D + B), ExprAssign(IRDst, END)]]) + +for irb in [G14_IRB0, G14_IRB1, G14_IRB2, G14_IRB3]: + G14_IRA.add_irblock(irb) + +# graph 16 + +G15_IRA = IRA.new_ircfg() + +G15_IRB0 = gen_irblock(LBL0, [[ExprAssign(A, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G15_IRB1 = gen_irblock(LBL1, [[ExprAssign(D, A + B)], + [ExprAssign(C, D)], + [ExprAssign(B, C), + ExprAssign(IRDst, + ExprCond( + C, + ExprLoc(LBL1, 32), + ExprLoc(LBL2, 32) + ) + )]]) +G15_IRB2 = gen_irblock(LBL2, [[ExprAssign(R, B), ExprAssign(IRDst, END)]]) + +for irb in [G15_IRB0, G15_IRB1, G15_IRB2]: + G15_IRA.add_irblock(irb) + +# graph 16 + +G16_IRA = IRA.new_ircfg() + +G16_IRB0 = gen_irblock( + LBL0, [ + [ExprAssign(A, CST1), ExprAssign(IRDst, ExprLoc(LBL1, 32))] + ] +) + +G16_IRB1 = gen_irblock( + LBL1, + [ + [ExprAssign(R, D), + ExprAssign( + IRDst, + ExprCond( + C, + ExprCond( + C, + ExprCond( + C, + ExprLoc(LBL2, 32), + ExprLoc(LBL3, 32) + ), + ExprLoc(LBL4, 32) + ), + ExprLoc(LBL5, 32) + ) + ) + ] + ] +) + + + +G16_IRB2 = gen_irblock(LBL2, [[ExprAssign(D, A), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G16_IRB3 = gen_irblock(LBL3, [[ExprAssign(R, D), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G16_IRB4 = gen_irblock(LBL4, [[ExprAssign(R, A), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G16_IRB5 = gen_irblock(LBL5, [[ExprAssign(R, A), ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) + +for irb in [G16_IRB0, G16_IRB1, G16_IRB2, G16_IRB3, G16_IRB4, G16_IRB5]: + G16_IRA.add_irblock(irb) + +# graph 17 + +G17_IRA = IRA.new_ircfg() + +G17_IRB0 = gen_irblock(LBL0, [[ExprAssign(A, CST1), + ExprAssign(D, CST2), + ExprAssign(IRDst, ExprLoc(LBL1, 32))]]) +G17_IRB1 = gen_irblock(LBL1, [[ExprAssign(A, D), + ExprAssign(B, D), + ExprAssign(IRDst, ExprLoc(LBL2, 32))]]) +G17_IRB2 = gen_irblock(LBL2, [[ExprAssign(A, A - B), + ExprAssign(IRDst, END)]]) + +G17_IRA.add_uniq_edge(G17_IRB0.loc_key, G17_IRB1.loc_key) +G17_IRA.add_uniq_edge(G17_IRB1.loc_key, G17_IRB2.loc_key) + +for irb in [G17_IRB0, G17_IRB1, G17_IRB2]: + G17_IRA.add_irblock(irb) + +# Test graph 1 +G1_TEST1_DN1 = DependencyNode( + G1_IRB2.loc_key, A, len(G1_IRB2)) + +G1_INPUT = (set([G1_TEST1_DN1]), set([G1_IRB0.loc_key])) + +# Test graph 2 + +G2_TEST1_DN1 = DependencyNode( + G2_IRB2.loc_key, A, len(G2_IRB2)) + +G2_INPUT = (set([G2_TEST1_DN1]), set([G2_IRB0.loc_key])) + +# Test graph 3 + +G3_TEST1_0_DN1 = DependencyNode( + G3_IRB3.loc_key, A, len(G3_IRB3)) + +G3_INPUT = (set([G3_TEST1_0_DN1]), set([G3_IRB0.loc_key])) + +# Test graph 4 + +G4_TEST1_DN1 = DependencyNode( + G4_IRB2.loc_key, A, len(G2_IRB0)) + +G4_INPUT = (set([G4_TEST1_DN1]), set([G4_IRB0.loc_key])) + +# Test graph 5 + +G5_TEST1_0_DN1 = DependencyNode( + G5_IRB2.loc_key, A, len(G5_IRB2)) + +G5_INPUT = (set([G5_TEST1_0_DN1]), set([G5_IRB0.loc_key])) + +# Test graph 6 + +G6_TEST1_0_DN1 = DependencyNode( + G6_IRB1.loc_key, A, len(G6_IRB1)) + +G6_INPUT = (set([G6_TEST1_0_DN1]), set([G6_IRB0.loc_key])) + +# Test graph 7 + +G7_TEST1_0_DN1 = DependencyNode( + G7_IRB2.loc_key, D, len(G7_IRB2)) + +G7_INPUT = (set([G7_TEST1_0_DN1]), set([G7_IRB0.loc_key])) + +# Test graph 8 + +G8_TEST1_0_DN1 = DependencyNode( + G8_IRB2.loc_key, A, len(G8_IRB2)) + +G8_INPUT = (set([G8_TEST1_0_DN1]), set([G3_IRB0.loc_key])) + +# Test 9: Multi elements + +G9_TEST1_0_DN1 = DependencyNode( + G8_IRB2.loc_key, A, len(G8_IRB2)) +G9_TEST1_0_DN5 = DependencyNode( + G8_IRB2.loc_key, C, len(G8_IRB2)) + +G9_INPUT = (set([G9_TEST1_0_DN1, G9_TEST1_0_DN5]), set([G8_IRB0.loc_key])) + +# Test 10: loop at beginning + +G10_TEST1_0_DN1 = DependencyNode( + G10_IRB2.loc_key, A, len(G10_IRB2)) + +G10_INPUT = (set([G10_TEST1_0_DN1]), set([G10_IRB1.loc_key])) + + +# Test 11: no dual block emulation + +G11_TEST1_DN1 = DependencyNode( + G11_IRB2.loc_key, A, len(G11_IRB2)) + +G11_INPUT = (set([G11_TEST1_DN1]), set([G11_IRB0.loc_key])) + +# Test graph 12 + +G12_TEST1_0_DN1 = DependencyNode(G12_IRB2.loc_key, B, 1) + +G12_INPUT = (set([G12_TEST1_0_DN1]), set([])) + +# Test graph 13: + +# All filters + +G13_TEST1_0_DN4 = DependencyNode(G13_IRB3.loc_key, R, 1) + +G13_INPUT = (set([G13_TEST1_0_DN4]), set([])) + +# Test graph 14 + +# All filters + +G14_TEST1_0_DN1 = DependencyNode(G14_IRB3.loc_key, R, 1) + +G14_INPUT = (set([G14_TEST1_0_DN1]), set([])) + +# Test graph 15 + +G15_TEST1_0_DN1 = DependencyNode(G15_IRB2.loc_key, R, 1) + +G15_INPUT = (set([G15_TEST1_0_DN1]), set([])) + +# Test graph 16 +G16_TEST1_0_DN1 = DependencyNode(G16_IRB5.loc_key, R, 1) + +G16_INPUT = (set([G16_TEST1_0_DN1]), set([])) + +# Test graph 17 + +G17_TEST1_DN1 = DependencyNode(G17_IRB2.loc_key, A, 1) + +G17_INPUT = (set([G17_TEST1_DN1]), set([])) + + +FAILED = set() + + +def flatNode(node): + if isinstance(node, DependencyNode): + if isinstance(node.element, ExprId): + element = node.element.name + elif isinstance(node.element, ExprInt): + element = int(node.element) + else: + RuntimeError("Unsupported type '%s'" % type(enode.element)) + names = loc_db.get_location_names(node.loc_key) + assert len(names) == 1 + name = next(iter(names)) + return ( + name, + element, + node.line_nb + ) + else: + return str(node) + + +def flatGraph(graph): + out_nodes, out_edges = set(), set() + for node in graph.nodes(): + out_nodes.add(flatNode(node)) + for nodeA, nodeB in graph.edges(): + out_edges.add((flatNode(nodeA), flatNode(nodeB))) + out = ( + tuple(sorted(list(out_nodes), key=str)), + tuple(sorted(list(out_edges), key=str)) + ) + return out + + +def unflatGraph(flat_graph): + graph = DiGraph() + nodes, edges = flat_graph + for node in nodes: + graph.add_node(node) + for nodeA, nodeB in edges: + graph.add_edge(nodeA, nodeB) + return graph + + +def get_node_noidx(node): + if isinstance(node, tuple): + return (node[0], node[1], node[2]) + else: + return node + + +def test_result(graphA, graphB, leaves): + """ + Test graph equality without using node index + """ + + todo = set((leaf, leaf) for leaf in leaves) + done = set() + while todo: + nodeA, nodeB = todo.pop() + if (nodeA, nodeB) in done: + continue + done.add((nodeA, nodeB)) + + if get_node_noidx(nodeA) != get_node_noidx(nodeB): + return False + if nodeA not in graphA.nodes(): + return False + if nodeB not in graphB.nodes(): + return False + + parentsA = graphA.predecessors(nodeA) + parentsB = graphB.predecessors(nodeB) + if len(parentsA) != len(parentsB): + return False + + parentsA_noidx, parentsB_noidx = {}, {} + for parents, parents_noidx in ((parentsA, parentsA_noidx), + (parentsB, parentsB_noidx)): + for node in parents: + node_noidx = get_node_noidx(node) + assert(node_noidx not in parents_noidx) + parents_noidx[node_noidx] = node + + if set(parentsA_noidx.keys()) != set(parentsB_noidx.keys()): + return False + + for node_noidx, nodeA in viewitems(parentsA_noidx): + nodeB = parentsB_noidx[node_noidx] + todo.add((nodeA, nodeB)) + + return True + + +def match_results(resultsA, resultsB, nodes): + """ + Match computed list of graph against test cases + """ + out = [] + + if len(resultsA) != len(resultsB): + return False + + for flatA in resultsA: + resultA = unflatGraph(flatA) + nodes = resultA.leaves() + for resultB in resultsB: + if test_result(resultA, resultB, nodes): + out.append((resultA, resultB)) + return len(out) == len(resultsB) + + +def get_flat_init_depnodes(depnodes): + out = [] + for node in depnodes: + name = loc_db.pretty_str(node.loc_key) + out.append((name, + node.element.name, + node.line_nb, + 0)) + return out + +# TESTS +flat_test_results = [[((('lbl0', 1, 0), ('lbl0', 'c', 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))], + [((('lbl0', 1, 0), + ('lbl0', 'c', 0), + ('lbl1', 2, 0), + ('lbl1', 'b', 0), + ('lbl2', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl2', 'a', 0)), + (('lbl1', 2, 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))], + [((('lbl0', 1, 0), + ('lbl0', 'c', 0), + ('lbl1', 2, 0), + ('lbl1', 'b', 0), + ('lbl3', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl3', 'a', 0)), + (('lbl1', 2, 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl3', 'a', 0)))), + ((('lbl0', 1, 0), + ('lbl0', 'c', 0), + ('lbl2', 3, 0), + ('lbl2', 'b', 0), + ('lbl3', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl3', 'a', 0)), + (('lbl2', 3, 0), ('lbl2', 'b', 0)), + (('lbl2', 'b', 0), ('lbl3', 'a', 0))))], + [(('b', ('lbl2', 'a', 0)), (('b', ('lbl2', 'a', 0)),))], + [((('lbl0', 1, 0), + ('lbl0', 'b', 0), + ('lbl1', 2, 0), + ('lbl1', 'b', 0), + ('lbl2', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'b', 0)), + (('lbl1', 2, 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0)))), + ((('lbl0', 1, 0), + ('lbl0', 'b', 0), + ('lbl1', 2, 0), + ('lbl1', 'b', 0), + ('lbl2', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'b', 0)), + (('lbl1', 2, 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))], + [((('lbl0', 1, 0), ('lbl0', 'b', 0), ('lbl1', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'a', 0))))], + [((('lbl0', 1, 0), + ('lbl0', 'c', 0), + ('lbl1', 'a', 1), + ('lbl1', 'b', 0), + ('lbl2', 'd', 0)), + ((('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl1', 'b', 0)), + (('lbl1', 'a', 1), ('lbl2', 'd', 0)), + (('lbl1', 'b', 0), ('lbl1', 'a', 1))))], + [(('d', ('lbl1', 'b', 0), ('lbl1', 'c', 1), ('lbl2', 'a', 0)), + (('d', ('lbl1', 'c', 1)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0)), + (('lbl1', 'c', 1), ('lbl1', 'b', 0)))), + ((('lbl0', 1, 0), ('lbl0', 'c', 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))], + [(('d', + ('lbl0', 1, 0), + ('lbl0', 'c', 0), + ('lbl1', 'b', 0), + ('lbl1', 'c', 1), + ('lbl2', 'a', 0)), + (('d', ('lbl1', 'c', 1)), + (('lbl0', 1, 0), ('lbl0', 'c', 0)), + (('lbl0', 'c', 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0)))), + (('d', ('lbl1', 'b', 0), ('lbl1', 'c', 1), ('lbl2', 'a', 0)), + (('d', ('lbl1', 'c', 1)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0)), + (('lbl1', 'c', 1), ('lbl1', 'b', 0))))], + [(('b', ('lbl1', 2, 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)), + (('b', ('lbl1', 'b', 0)), + (('lbl1', 2, 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0)))), + (('b', ('lbl1', 2, 0), ('lbl1', 'b', 0), ('lbl2', 'a', 0)), + (('b', ('lbl1', 'b', 0)), + (('lbl1', 2, 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))], + [((('lbl0', 1, 0), + ('lbl0', 2, 0), + ('lbl0', 'a', 0), + ('lbl0', 'b', 0), + ('lbl1', 'a', 0), + ('lbl1', 'b', 0), + ('lbl2', 'a', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 2, 0), ('lbl0', 'b', 0)), + (('lbl0', 'a', 0), ('lbl1', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'a', 0)), + (('lbl1', 'a', 0), ('lbl2', 'a', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))], + [((('lbl0', 1, 0), + ('lbl0', 'b', 0), + ('lbl1', 2, 1), + ('lbl1', 'a', 0), + ('lbl1', 'b', 1), + ('lbl2', 'b', 0)), + ((('lbl0', 1, 0), ('lbl0', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'b', 1)), + (('lbl1', 2, 1), ('lbl1', 'b', 1)), + (('lbl1', 'a', 0), ('lbl2', 'b', 0)), + (('lbl1', 'b', 1), ('lbl1', 'a', 0)))), + ((('lbl0', 1, 0), + ('lbl0', 'b', 0), + ('lbl1', 2, 1), + ('lbl1', 'a', 0), + ('lbl1', 'b', 1), + ('lbl2', 'b', 0)), + ((('lbl0', 1, 0), ('lbl0', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'b', 1)), + (('lbl1', 2, 1), ('lbl1', 'b', 1)), + (('lbl1', 'a', 0), ('lbl2', 'b', 0)), + (('lbl1', 'b', 1), ('lbl1', 'a', 0)), + (('lbl1', 'b', 1), ('lbl1', 'b', 1)))), + ((('lbl0', 1, 0), ('lbl0', 'b', 0), ('lbl1', 'a', 0), ('lbl2', 'b', 0)), + ((('lbl0', 1, 0), ('lbl0', 'b', 0)), + (('lbl0', 'b', 0), ('lbl1', 'a', 0)), + (('lbl1', 'a', 0), ('lbl2', 'b', 0))))], + [((('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'c', 0), + ('lbl2', 3, 0), + ('lbl2', 3, 1), + ('lbl2', 'a', 1), + ('lbl2', 'b', 0), + ('lbl3', 'r', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl2', 'b', 0)), + (('lbl1', 'c', 0), ('lbl3', 'r', 0)), + (('lbl2', 3, 0), ('lbl2', 'b', 0)), + (('lbl2', 3, 1), ('lbl2', 'a', 1)), + (('lbl2', 'a', 1), ('lbl1', 'c', 0)), + (('lbl2', 'a', 1), ('lbl2', 'b', 0)), + (('lbl2', 'b', 0), ('lbl2', 'a', 1)))), + ((('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'c', 0), + ('lbl2', 3, 0), + ('lbl2', 3, 1), + ('lbl2', 'a', 1), + ('lbl2', 'b', 0), + ('lbl3', 'r', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl2', 'b', 0)), + (('lbl1', 'c', 0), ('lbl3', 'r', 0)), + (('lbl2', 3, 0), ('lbl2', 'b', 0)), + (('lbl2', 3, 1), ('lbl2', 'a', 1)), + (('lbl2', 'a', 1), ('lbl1', 'c', 0)), + (('lbl2', 'b', 0), ('lbl2', 'a', 1)))), + ((('lbl0', 1, 0), ('lbl0', 'a', 0), ('lbl1', 'c', 0), ('lbl3', 'r', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl1', 'c', 0)), + (('lbl1', 'c', 0), ('lbl3', 'r', 0))))], + [(('d', + ('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'b', 0), + ('lbl3', 'r', 0)), + (('d', ('lbl3', 'r', 0)), + (('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl1', 'b', 0)), + (('lbl1', 'b', 0), ('lbl3', 'r', 0)))), + ((('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'b', 0), + ('lbl2', 1, 1), + ('lbl2', 'a', 1), + ('lbl2', 'd', 0), + ('lbl3', 'r', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl2', 'd', 0)), + (('lbl1', 'b', 0), ('lbl3', 'r', 0)), + (('lbl2', 1, 1), ('lbl2', 'a', 1)), + (('lbl2', 'a', 1), ('lbl1', 'b', 0)), + (('lbl2', 'a', 1), ('lbl2', 'd', 0)), + (('lbl2', 'd', 0), ('lbl2', 'a', 1)), + (('lbl2', 'd', 0), ('lbl3', 'r', 0)))), + ((('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'b', 0), + ('lbl2', 1, 1), + ('lbl2', 'a', 1), + ('lbl2', 'd', 0), + ('lbl3', 'r', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl2', 'd', 0)), + (('lbl1', 'b', 0), ('lbl3', 'r', 0)), + (('lbl2', 1, 1), ('lbl2', 'a', 1)), + (('lbl2', 'a', 1), ('lbl1', 'b', 0)), + (('lbl2', 'd', 0), ('lbl2', 'a', 1)), + (('lbl2', 'd', 0), ('lbl3', 'r', 0))))], + [(('b', + ('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'b', 2), + ('lbl1', 'c', 1), + ('lbl1', 'd', 0), + ('lbl2', 'r', 0)), + (('b', ('lbl1', 'd', 0)), + (('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl1', 'd', 0)), + (('lbl1', 'b', 2), ('lbl1', 'd', 0)), + (('lbl1', 'b', 2), ('lbl2', 'r', 0)), + (('lbl1', 'c', 1), ('lbl1', 'b', 2)), + (('lbl1', 'd', 0), ('lbl1', 'c', 1)))), + (('b', + ('lbl0', 1, 0), + ('lbl0', 'a', 0), + ('lbl1', 'b', 2), + ('lbl1', 'c', 1), + ('lbl1', 'd', 0), + ('lbl2', 'r', 0)), + (('b', ('lbl1', 'd', 0)), + (('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl1', 'd', 0)), + (('lbl1', 'b', 2), ('lbl2', 'r', 0)), + (('lbl1', 'c', 1), ('lbl1', 'b', 2)), + (('lbl1', 'd', 0), ('lbl1', 'c', 1))))], + [((('lbl0', 1, 0), ('lbl0', 'a', 0), ('lbl5', 'r', 0)), + ((('lbl0', 1, 0), ('lbl0', 'a', 0)), + (('lbl0', 'a', 0), ('lbl5', 'r', 0))))], + [((('lbl0', 2, 0), + ('lbl0', 'd', 0), + ('lbl1', 'a', 0), + ('lbl1', 'b', 0), + ('lbl2', 'a', 0)), + ((('lbl0', 2, 0), ('lbl0', 'd', 0)), + (('lbl0', 'd', 0), ('lbl1', 'a', 0)), + (('lbl0', 'd', 0), ('lbl1', 'b', 0)), + (('lbl1', 'a', 0), ('lbl2', 'a', 0)), + (('lbl1', 'b', 0), ('lbl2', 'a', 0))))]] + +test_results = [[unflatGraph(flat_result) for flat_result in flat_results] + for flat_results in flat_test_results] + +all_flats = [] +# Launch tests +for test_nb, test in enumerate([(G1_IRA, G1_INPUT), + (G2_IRA, G2_INPUT), + (G3_IRA, G3_INPUT), + (G4_IRA, G4_INPUT), + (G5_IRA, G5_INPUT), + (G6_IRA, G6_INPUT), + (G7_IRA, G7_INPUT), + (G8_IRA, G8_INPUT), + (G8_IRA, G9_INPUT), + (G10_IRA, G10_INPUT), + (G11_IRA, G11_INPUT), + (G12_IRA, G12_INPUT), + (G13_IRA, G13_INPUT), + (G14_IRA, G14_INPUT), + (G15_IRA, G15_INPUT), + (G16_IRA, G16_INPUT), + (G17_IRA, G17_INPUT), + ]): + + # Extract test elements + print("[+] Test", test_nb + 1) + ircfg, (depnodes, heads) = test + + open("graph_%02d.dot" % (test_nb + 1), "w").write(ircfg.dot()) + open("graph_%02d.dot" % (test_nb + 1), "w").write(bloc2graph(ircfg)) + + # Different options + suffix_key_list = ["", "_nosimp", "_nomem", "_nocall", + "_implicit"] + # Test classes + for g_ind, g_dep in enumerate([DependencyGraph(ircfg), + DependencyGraph(ircfg, apply_simp=False), + DependencyGraph(ircfg, follow_mem=False), + DependencyGraph( + ircfg, follow_mem=False, + follow_call=False + ), + # DependencyGraph(ircfg, implicit=True), + ]): + # if g_ind == 4: + # TODO: Implicit specifications + # continue + print(" - Class %s - %s" % (g_dep.__class__.__name__, + suffix_key_list[g_ind])) + # Select the correct result key + mode_suffix = suffix_key_list[g_ind] + graph_test_key = "graph" + mode_suffix + + # Test public APIs + results = g_dep.get_from_depnodes(depnodes, heads) + print("RESULTS") + all_results = set() + all_flat = set() + for i, result in enumerate(results): + all_flat.add(flatGraph(result.graph)) + all_results.add(flatGraph(result.graph)) + open("graph_test_%02d_%02d.dot" % (test_nb + 1, i), + "w").write(dg2graph(result.graph)) + + if g_ind == 0: + all_flat = sorted(all_flat, key=str) + all_flats.append(all_flat) + flat_depnodes = get_flat_init_depnodes(depnodes) + if not match_results(all_results, test_results[test_nb], flat_depnodes): + FAILED.add(test_nb) + continue + +if FAILED: + print("FAILED :", len(FAILED)) + for test_num in sorted(FAILED): + print(test_num, end=' ') +else: + print("SUCCESS") + +# Return an error status on error +assert not FAILED + +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to configure .deb packages. +(c) 2014, Brian Coca + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: debconf +short_description: Configure a .deb package +description: + - Configure a .deb package using debconf-set-selections. Or just query + existing selections. +version_added: "1.6" +notes: + - This module requires the command line debconf tools. + - A number of questions have to be answered (depending on the package). + Use 'debconf-show ' on any Debian or derivative with the package + installed to see questions/settings available. + - Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords. +requirements: [ debconf, debconf-utils ] +options: + name: + description: + - Name of package to configure. + required: true + default: null + aliases: ['pkg'] + question: + description: + - A debconf configuration setting + required: false + default: null + aliases: ['setting', 'selection'] + vtype: + description: + - The type of the value supplied + required: false + default: null + choices: [string, password, boolean, select, multiselect, note, error, title, text] + aliases: [] + value: + description: + - Value to set the configuration to + required: false + default: null + aliases: ['answer'] + unseen: + description: + - Do not set 'seen' flag when pre-seeding + required: false + default: False + aliases: [] +author: "Brian Coca (@bcoca)" + +''' + +EXAMPLES = ''' +# Set default locale to fr_FR.UTF-8 +debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select' + +# set to generate locales: +debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect' + +# Accept oracle license +debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' + +# Specifying package you can register/return the list of questions and current values +debconf: name='tzdata' +''' + +def get_selections(module, pkg): + cmd = [module.get_bin_path('debconf-show', True), pkg] + rc, out, err = module.run_command(' '.join(cmd)) + + if rc != 0: + module.fail_json(msg=err) + + selections = {} + + for line in out.splitlines(): + (key, value) = line.split(':', 1) + selections[ key.strip('*').strip() ] = value.strip() + + return selections + + +def set_selection(module, pkg, question, vtype, value, unseen): + + setsel = module.get_bin_path('debconf-set-selections', True) + cmd = [setsel] + if unseen: + cmd.append('-u') + + data = ' '.join([pkg, question, vtype, value]) + + return module.run_command(cmd, data=data) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['pkg'], type='str'), + question = dict(required=False, aliases=['setting', 'selection'], type='str'), + vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']), + value= dict(required=False, type='str'), + unseen = dict(required=False, type='bool'), + ), + required_together = ( ['question','vtype', 'value'],), + supports_check_mode=True, + ) + + #TODO: enable passing array of options and/or debconf file from get-selections dump + pkg = module.params["name"] + question = module.params["question"] + vtype = module.params["vtype"] + value = module.params["value"] + unseen = module.params["unseen"] + + prev = get_selections(module, pkg) + + changed = False + msg = "" + + if question is not None: + if vtype is None or value is None: + module.fail_json(msg="when supplying a question you must supply a valid vtype and value") + + if not question in prev or prev[question] != value: + changed = True + + if changed: + if not module.check_mode: + rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) + if rc: + module.fail_json(msg=e) + + curr = { question: value } + if question in prev: + prev = {question: prev[question]} + else: + prev[question] = '' + + module.exit_json(changed=changed, msg=msg, current=curr, previous=prev) + + module.exit_json(changed=changed, msg=msg, current=prev) + +# import module snippets +from ansible.module_utils.basic import * + +main() + +#! /usr/bin/python3.3 + +""" +"PYSTONE" Benchmark Program + +Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes) + +Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013. + + Translated from ADA to C by Rick Richardson. + Every method to preserve ADA-likeness has been used, + at the expense of C-ness. + + Translated from C to Python by Guido van Rossum. + +Version History: + + Version 1.1 corrects two bugs in version 1.0: + + First, it leaked memory: in Proc1(), NextRecord ends + up having a pointer to itself. I have corrected this + by zapping NextRecord.PtrComp at the end of Proc1(). + + Second, Proc3() used the operator != to compare a + record to None. This is rather inefficient and not + true to the intention of the original benchmark (where + a pointer comparison to None is intended; the != + operator attempts to find a method __cmp__ to do value + comparison of the record). Version 1.1 runs 5-10 + percent faster than version 1.0, so benchmark figures + of different versions can't be compared directly. + +""" + +LOOPS = 50000 + +from time import clock + +__version__ = "1.1" + +[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6) + +class Record: + + def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0, + IntComp = 0, StringComp = 0): + self.PtrComp = PtrComp + self.Discr = Discr + self.EnumComp = EnumComp + self.IntComp = IntComp + self.StringComp = StringComp + + def copy(self): + return Record(self.PtrComp, self.Discr, self.EnumComp, + self.IntComp, self.StringComp) + +TRUE = 1 +FALSE = 0 + +def main(loops=LOOPS): + benchtime, stones = pystones(loops) + print("Pystone(%s) time for %d passes = %g" % \ + (__version__, loops, benchtime)) + print("This machine benchmarks at %g pystones/second" % stones) + + +def pystones(loops=LOOPS): + return Proc0(loops) + +IntGlob = 0 +BoolGlob = FALSE +Char1Glob = '\0' +Char2Glob = '\0' +Array1Glob = [0]*51 +Array2Glob = [x[:] for x in [Array1Glob]*51] +PtrGlb = None +PtrGlbNext = None + +def Proc0(loops=LOOPS): + global IntGlob + global BoolGlob + global Char1Glob + global Char2Glob + global Array1Glob + global Array2Glob + global PtrGlb + global PtrGlbNext + + starttime = clock() + for i in range(loops): + pass + nulltime = clock() - starttime + + PtrGlbNext = Record() + PtrGlb = Record() + PtrGlb.PtrComp = PtrGlbNext + PtrGlb.Discr = Ident1 + PtrGlb.EnumComp = Ident3 + PtrGlb.IntComp = 40 + PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING" + String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING" + Array2Glob[8][7] = 10 + + starttime = clock() + + for i in range(loops): + Proc5() + Proc4() + IntLoc1 = 2 + IntLoc2 = 3 + String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING" + EnumLoc = Ident2 + BoolGlob = not Func2(String1Loc, String2Loc) + while IntLoc1 < IntLoc2: + IntLoc3 = 5 * IntLoc1 - IntLoc2 + IntLoc3 = Proc7(IntLoc1, IntLoc2) + IntLoc1 = IntLoc1 + 1 + Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3) + PtrGlb = Proc1(PtrGlb) + CharIndex = 'A' + while CharIndex <= Char2Glob: + if EnumLoc == Func1(CharIndex, 'C'): + EnumLoc = Proc6(Ident1) + CharIndex = chr(ord(CharIndex)+1) + IntLoc3 = IntLoc2 * IntLoc1 + IntLoc2 = IntLoc3 / IntLoc1 + IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1 + IntLoc1 = Proc2(IntLoc1) + + benchtime = clock() - starttime - nulltime + if benchtime == 0.0: + loopsPerBenchtime = 0.0 + else: + loopsPerBenchtime = (loops / benchtime) + return benchtime, loopsPerBenchtime + +def Proc1(PtrParIn): + PtrParIn.PtrComp = NextRecord = PtrGlb.copy() + PtrParIn.IntComp = 5 + NextRecord.IntComp = PtrParIn.IntComp + NextRecord.PtrComp = PtrParIn.PtrComp + NextRecord.PtrComp = Proc3(NextRecord.PtrComp) + if NextRecord.Discr == Ident1: + NextRecord.IntComp = 6 + NextRecord.EnumComp = Proc6(PtrParIn.EnumComp) + NextRecord.PtrComp = PtrGlb.PtrComp + NextRecord.IntComp = Proc7(NextRecord.IntComp, 10) + else: + PtrParIn = NextRecord.copy() + NextRecord.PtrComp = None + return PtrParIn + +def Proc2(IntParIO): + IntLoc = IntParIO + 10 + while 1: + if Char1Glob == 'A': + IntLoc = IntLoc - 1 + IntParIO = IntLoc - IntGlob + EnumLoc = Ident1 + if EnumLoc == Ident1: + break + return IntParIO + +def Proc3(PtrParOut): + global IntGlob + + if PtrGlb is not None: + PtrParOut = PtrGlb.PtrComp + else: + IntGlob = 100 + PtrGlb.IntComp = Proc7(10, IntGlob) + return PtrParOut + +def Proc4(): + global Char2Glob + + BoolLoc = Char1Glob == 'A' + BoolLoc = BoolLoc or BoolGlob + Char2Glob = 'B' + +def Proc5(): + global Char1Glob + global BoolGlob + + Char1Glob = 'A' + BoolGlob = FALSE + +def Proc6(EnumParIn): + EnumParOut = EnumParIn + if not Func3(EnumParIn): + EnumParOut = Ident4 + if EnumParIn == Ident1: + EnumParOut = Ident1 + elif EnumParIn == Ident2: + if IntGlob > 100: + EnumParOut = Ident1 + else: + EnumParOut = Ident4 + elif EnumParIn == Ident3: + EnumParOut = Ident2 + elif EnumParIn == Ident4: + pass + elif EnumParIn == Ident5: + EnumParOut = Ident3 + return EnumParOut + +def Proc7(IntParI1, IntParI2): + IntLoc = IntParI1 + 2 + IntParOut = IntParI2 + IntLoc + return IntParOut + +def Proc8(Array1Par, Array2Par, IntParI1, IntParI2): + global IntGlob + + IntLoc = IntParI1 + 5 + Array1Par[IntLoc] = IntParI2 + Array1Par[IntLoc+1] = Array1Par[IntLoc] + Array1Par[IntLoc+30] = IntLoc + for IntIndex in range(IntLoc, IntLoc+2): + Array2Par[IntLoc][IntIndex] = IntLoc + Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1 + Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc] + IntGlob = 5 + +def Func1(CharPar1, CharPar2): + CharLoc1 = CharPar1 + CharLoc2 = CharLoc1 + if CharLoc2 != CharPar2: + return Ident1 + else: + return Ident2 + +def Func2(StrParI1, StrParI2): + IntLoc = 1 + while IntLoc <= 1: + if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1: + CharLoc = 'A' + IntLoc = IntLoc + 1 + if CharLoc >= 'W' and CharLoc <= 'Z': + IntLoc = 7 + if CharLoc == 'X': + return TRUE + else: + if StrParI1 > StrParI2: + IntLoc = IntLoc + 7 + return TRUE + else: + return FALSE + +def Func3(EnumParIn): + EnumLoc = EnumParIn + if EnumLoc == Ident3: return TRUE + return FALSE + +if __name__ == '__main__': + import sys + def error(msg): + print(msg, end=' ', file=sys.stderr) + print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr) + sys.exit(100) + nargs = len(sys.argv) - 1 + if nargs > 1: + error("%d arguments are too many;" % nargs) + elif nargs == 1: + try: loops = int(sys.argv[1]) + except ValueError: + error("Invalid argument %r;" % sys.argv[1]) + else: + loops = LOOPS + main(loops) + +#!/usr/bin/env python +# coding: utf-8 +# Copyright (c) Pymatgen Development Team. +# Distributed under the terms of the MIT License. + +""" +Implementation for `pmg plot` CLI. +""" + +from collections import OrderedDict +from pymatgen import Structure +from pymatgen.electronic_structure.plotter import DosPlotter +from pymatgen.io.vasp import Vasprun, Chgcar +from pymatgen.symmetry.analyzer import SpacegroupAnalyzer +from pymatgen.analysis.diffraction.xrd import XRDCalculator + + +def get_dos_plot(args): + """ + Plot DOS. + + Args: + args (dict): Args from argparse. + """ + v = Vasprun(args.dos_file) + dos = v.complete_dos + + all_dos = OrderedDict() + all_dos["Total"] = dos + + structure = v.final_structure + + if args.site: + for i in range(len(structure)): + site = structure[i] + all_dos["Site " + str(i) + " " + site.specie.symbol] = \ + dos.get_site_dos(site) + + if args.element: + syms = [tok.strip() for tok in args.element[0].split(",")] + all_dos = {} + for el, dos in dos.get_element_dos().items(): + if el.symbol in syms: + all_dos[el] = dos + if args.orbital: + all_dos = dos.get_spd_dos() + + plotter = DosPlotter() + plotter.add_dos_dict(all_dos) + return plotter.get_plot() + + +def get_chgint_plot(args): + """ + Plot integrated charge. + + Args: + args (dict): args from argparse. + """ + chgcar = Chgcar.from_file(args.chgcar_file) + s = chgcar.structure + + if args.inds: + atom_ind = [int(i) for i in args.inds[0].split(",")] + else: + finder = SpacegroupAnalyzer(s, symprec=0.1) + sites = [sites[0] for sites in + finder.get_symmetrized_structure().equivalent_sites] + atom_ind = [s.sites.index(site) for site in sites] + + from pymatgen.util.plotting import pretty_plot + plt = pretty_plot(12, 8) + for i in atom_ind: + d = chgcar.get_integrated_diff(i, args.radius, 30) + plt.plot(d[:, 0], d[:, 1], + label="Atom {} - {}".format(i, s[i].species_string)) + plt.legend(loc="upper left") + plt.xlabel("Radius (A)") + plt.ylabel("Integrated charge (e)") + plt.tight_layout() + return plt + + +def get_xrd_plot(args): + """ + Plot XRD + + Args: + args (dict): Args from argparse + """ + s = Structure.from_file(args.xrd_structure_file) + c = XRDCalculator() + return c.get_plot(s) + + +def plot(args): + """ + Master control method calling other plot methods based on args. + + Args: + args (dict): Args from argparse. + """ + plt = None + if args.chgcar_file: + plt = get_chgint_plot(args) + elif args.xrd_structure_file: + plt = get_xrd_plot(args) + elif args.dos_file: + plt = get_dos_plot(args) + + if plt: + if args.out_file: + plt.savefig(args.out_file) + else: + plt.show() + +# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood +# Copyright (c) 2009 The Hewlett-Packard Development Company +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from slicc.util import PairContainer + +class Symbol(PairContainer): + def __init__(self, symtab, ident, location, pairs=None): + super(Symbol, self).__init__() + + from slicc.util import Location + from slicc.symbols import SymbolTable + if not isinstance(symtab, SymbolTable): raise AttributeError + if not isinstance(ident, str): raise AttributeError + if not isinstance(location, Location): raise AttributeError + + self.symtab = symtab + self.ident = ident + self.location = location + if pairs: + self.pairs.update(getattr(pairs, "pairs", pairs)) + if "short" not in self: + self["short"] = self.ident + self.used = False + + def __repr__(self): + return "[Symbol: %s]" % self.ident + + def __str__(self): + return str(self.ident) + + def __setitem__(self, key, value): + if key in self.pairs: + self.warning("Pair key '%s' re-defined. new: '%s' old: '%s'", + key, value, self.pairs[key]) + super(Symbol, self).__setitem__(key, value) + + @property + def short(self): + return self["short"] + + @property + def desc(self): + return self["desc"] + + def error(self, message, *args): + self.location.error(message, *args) + + def warning(self, message, *args): + self.location.warning(message, *args) + + def writeHTMLFiles(self, path): + pass + +__all__ = [ "Symbol" ] + +#!/usr/bin/env python +__author__ = 'Mike McCann' +__copyright__ = '2013' +__license__ = 'GPL v3' +__contact__ = 'mccann at mbari.org' + +__doc__ = ''' + +Master loader for all August 2013 SIMZ activities. + +Mike McCann +MBARI 13 August + +@var __date__: Date of last svn commit +@undocumented: __doc__ parser +@status: production +@license: GPL +''' + +import os +import sys +import datetime + +parentDir = os.path.join(os.path.dirname(__file__), "../") +sys.path.insert(0, parentDir) # So that CANON is found + + +from CANON import CANONLoader + +cl = CANONLoader('stoqs_simz_aug2013', 'Sampling and Identification of Marine Zooplankton - August 2013', + description = 'Rachel Carson and Dorado surveys in Northern Monterey Bay', + x3dTerrains = { '/stoqs/static/x3d/Monterey25_256.x3d': { + 'position': '-2822317.31255 -4438600.53640 3786150.85474', + 'orientation': '0.89575 -0.31076 -0.31791 1.63772', + 'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236' + } + }, + grdTerrain = os.path.join(parentDir, 'Monterey25.grd') + ) + +# Aboard the Carson use zuma +##cl.tdsBase = 'http://zuma.rc.mbari.org/thredds/' +cl.tdsBase = 'http://odss.mbari.org/thredds/' # Use this on shore +cl.dodsBase = cl.tdsBase + 'dodsC/' + +# 2-second decimated dorado data +##cl.dorado_base = cl.dodsBase + 'SIMZ/2013_Aug/dorado/' +cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2013/netcdf/' +cl.dorado_files = [ + 'Dorado389_2013_224_02_224_02_decim.nc', 'Dorado389_2013_225_00_225_00_decim.nc', + 'Dorado389_2013_225_01_225_01_decim.nc', 'Dorado389_2013_226_01_226_01_decim.nc', + 'Dorado389_2013_226_03_226_03_decim.nc', 'Dorado389_2013_227_00_227_00_decim.nc', + 'Dorado389_2013_227_01_227_01_decim.nc', 'Dorado389_2013_228_00_228_00_decim.nc', + 'Dorado389_2013_228_01_228_01_decim.nc', + ] +cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700', 'fl700_uncorr', 'salinity', 'biolume' ] + +# Spray glider - for just the duration of the campaign +cl.l_662_base = 'http://www.cencoos.org/thredds/dodsC/gliders/Line66/' +cl.l_662_files = ['OS_Glider_L_662_20130711_TS.nc'] +cl.l_662_parms = ['TEMP', 'PSAL', 'FLU2'] +cl.l_662_startDatetime = datetime.datetime(2013, 8, 10) +cl.l_662_endDatetime = datetime.datetime(2013, 8, 17) + + +# Rachel Carson Underway CTD +cl.rcuctd_base = cl.dodsBase + 'SIMZ/2013_Aug/carson/uctd/' +cl.rcuctd_files = [ + 'simz2013plm01.nc', 'simz2013plm02.nc', 'simz2013plm03.nc', 'simz2013plm04.nc', + 'simz2013plm05.nc', + ] +cl.rcuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ] + +# Rachel Carson Profile CTD +cl.pctdDir = 'SIMZ/2013_Aug/carson/pctd/' +cl.rcpctd_base = cl.dodsBase + cl.pctdDir +cl.rcpctd_files = [ + 'simz2013c01.nc', 'simz2013c02.nc', 'simz2013c03.nc', 'simz2013c04.nc', + 'simz2013c05.nc', 'simz2013c06.nc', 'simz2013c07.nc', 'simz2013c08.nc', + 'simz2013c09.nc', 'simz2013c10.nc', 'simz2013c11.nc', 'simz2013c12.nc', + 'simz2013c13.nc', 'simz2013c14.nc', 'simz2013c15.nc', 'simz2013c16.nc', + 'simz2013c17.nc', 'simz2013c18.nc', + ] +cl.rcpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar', 'oxygen' ] + +# Mooring M1 Combined file produced by DPforSSDS processing - for just the duration of the campaign +cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/201202/' +cl.m1_files = ['OS_M1_20120222hourly_CMSTV.nc'] +cl.m1_parms = [ 'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR', + 'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR', + 'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR' + ] +cl.m1_startDatetime = datetime.datetime(2013, 8, 12) +cl.m1_endDatetime = datetime.datetime(2013, 8, 19) + +# SubSample data files received from Julio in email and copied to local directory +cl.subsample_csv_base = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'SIMZAug2013') +cl.subsample_csv_files = [ + '2013_Aug_SIMZ_Niskin_microscopy_STOQS.csv', + '2013_SIMZ_AUV_STOQS.csv', + '2013_SIMZ_Niskins_STOQS.csv', + '2013_SIMZ_TowNets_STOQS.csv', + ] + +# Produce parent samples file with: +# cd loaders/MolecularEcology/SIMZAug2013 +# ../../../nettow.py --database stoqs_simz_aug2013 --subsampleFile 2013_SIMZ_TowNets_STOQS.csv --csvFile 2013_SIMZ_TowNet_ParentSamples.csv -v +cl.parent_nettow_file = '2013_SIMZ_TowNet_ParentSamples.csv' + + +# Execute the load +cl.process_command_line() + +if cl.args.test: + ##cl.loadL_662(stride=100) + cl.loadDorado(stride=100) + cl.loadRCuctd(stride=10) + cl.loadRCpctd(stride=10) + cl.loadM1(stride=1) + cl.loadParentNetTowSamples() + cl.loadSubSamples() + +elif cl.args.optimal_stride: + ##cl.loadL_662(stride=1) + cl.loadDorado(stride=1) + cl.loadRCuctd(stride=1) + cl.loadRCpctd(stride=1) + cl.loadM1(stride=1) + cl.loadParentNetTowSamples() + cl.loadSubSamples() + +else: + cl.stride = cl.args.stride + ##cl.loadL_662() + cl.loadDorado() + cl.loadRCuctd() + cl.loadRCpctd() + cl.loadM1() + cl.loadParentNetTowSamples() + cl.loadSubSamples() + +# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed +cl.addTerrainResources() + +print "All Done." + + +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" + OMERO.fs ServerMS module. + + The Server class is a wrapper to the MonitorServer. It handles the ICE + formalities. It controls the shutdown. + + Copyright 2009 University of Dundee. All rights reserved. + Use is subject to license terms supplied in LICENSE.txt + +""" +import logging +log = logging.getLogger("fsserver.MonitorServer") + +import sys +import Ice + +from omero.util import configure_server_logging + + +class Server(Ice.Application): + + """ + A fairly vanilla ICE server application. + + """ + + def run(self, args): + """ + Main method called via app.main() below. + + The Ice.Application is set to callbackOnInterrupt so that it can be + shutdown cleanly by the callback above. + + :param args: Arguments required by the ICE system. + :return: Exit state. + :rtype: int + """ + + props = self.communicator().getProperties() + configure_server_logging(props) + + try: + import fsMonitorServer + except: + log.exception("System requirements not met: \n") + return -1 + + # Create a MonitorServer, its adapter and activate it. + try: + serverIdString = self.getServerIdString(props) + serverAdapterName = self.getServerAdapterName(props) + mServer = fsMonitorServer.MonitorServerI() + adapter = self.communicator().createObjectAdapter( + serverAdapterName) + adapter.add( + mServer, self.communicator().stringToIdentity(serverIdString)) + adapter.activate() + except: + log.exception("Failed create OMERO.fs Server: \n") + return -1 + + log.info('Started OMERO.fs MonitorServer') + + # Wait for an interrupt. + self.communicator().waitForShutdown() + + log.info('Stopping OMERO.fs MonitorServer') + return 0 + + def getServerIdString(self, props): + """ + Get monitorServerIdString from the communicator properties. + + """ + return props.getPropertyWithDefault( + "omero.fs.monitorServerIdString", "") + + def getServerAdapterName(self, props): + """ + Get monitorServerAdapterName from the communicator properties. + + """ + return props.getPropertyWithDefault( + "omero.fs.monitorServerAdapterName", "") + + +if __name__ == '__main__': + try: + log.info('Trying to start OMERO.fs MonitorServer') + app = Server() + except: + log.exception("Failed to start the server:\n") + log.info("Exiting with exit code: -1") + sys.exit(-1) + + exitCode = app.main(sys.argv) + log.info("Exiting with exit code: %d", exitCode) + sys.exit(exitCode) + +""" +Extracts the version of the PostgreSQL server. +""" + +import re + +# This reg-exp is intentionally fairly flexible here. +# Needs to be able to handle stuff like: +# PostgreSQL 8.3.6 +# EnterpriseDB 8.3 +# PostgreSQL 8.3 beta4 +# PostgreSQL 8.4beta1 +VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?') + + +def _parse_version(text): + "Internal parsing method. Factored out for testing purposes." + major, major2, minor = VERSION_RE.search(text).groups() + try: + return int(major) * 10000 + int(major2) * 100 + int(minor) + except (ValueError, TypeError): + return int(major) * 10000 + int(major2) * 100 + +def get_version(connection): + """ + Returns an integer representing the major, minor and revision number of the + server. Format is the one used for the return value of libpq + PQServerVersion()/``server_version`` connection attribute (available in + newer psycopg2 versions.) + + For example, 80304 for 8.3.4. The last two digits will be 00 in the case of + releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and + prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2'). + + PQServerVersion()/``server_version`` doesn't execute a query so try that + first, then fallback to a ``SELECT version()`` query. + """ + if hasattr(connection, 'server_version'): + return connection.server_version + else: + cursor = connection.cursor() + cursor.execute("SELECT version()") + return _parse_version(cursor.fetchone()[0]) + +""" +Tools for the instructor dashboard +""" + + +import json +import operator + +import dateutil +import six +from django.contrib.auth.models import User +from django.http import HttpResponseBadRequest +from django.utils.translation import ugettext as _ +from edx_when import api +from opaque_keys.edx.keys import UsageKey +from pytz import UTC +from six import string_types, text_type +from six.moves import zip + +from student.models import get_user_by_username_or_email, CourseEnrollment + + +class DashboardError(Exception): + """ + Errors arising from use of the instructor dashboard. + """ + def response(self): + """ + Generate an instance of HttpResponseBadRequest for this error. + """ + error = six.text_type(self) + return HttpResponseBadRequest(json.dumps({'error': error})) + + +def handle_dashboard_error(view): + """ + Decorator which adds seamless DashboardError handling to a view. If a + DashboardError is raised during view processing, an HttpResponseBadRequest + is sent back to the client with JSON data about the error. + """ + def wrapper(request, course_id): + """ + Wrap the view. + """ + try: + return view(request, course_id=course_id) + except DashboardError as error: + return error.response() + + return wrapper + + +def strip_if_string(value): + if isinstance(value, string_types): + return value.strip() + return value + + +def get_student_from_identifier(unique_student_identifier): + """ + Gets a student object using either an email address or username. + + Returns the student object associated with `unique_student_identifier` + + Raises User.DoesNotExist if no user object can be found, the user was + retired, or the user is in the process of being retired. + + DEPRECATED: use student.models.get_user_by_username_or_email instead. + """ + return get_user_by_username_or_email(unique_student_identifier) + + +def require_student_from_identifier(unique_student_identifier): + """ + Same as get_student_from_identifier() but will raise a DashboardError if + the student does not exist. + """ + try: + return get_student_from_identifier(unique_student_identifier) + except User.DoesNotExist: + raise DashboardError( + _(u"Could not find student matching identifier: {student_identifier}").format( + student_identifier=unique_student_identifier + ) + ) + + +def parse_datetime(datestr): + """ + Convert user input date string into an instance of `datetime.datetime` in + UTC. + """ + try: + return dateutil.parser.parse(datestr).replace(tzinfo=UTC) + except ValueError: + raise DashboardError(_("Unable to parse date: ") + datestr) + + +def find_unit(course, url): + """ + Finds the unit (block, module, whatever the terminology is) with the given + url in the course tree and returns the unit. Raises DashboardError if no + unit is found. + """ + def find(node, url): + """ + Find node in course tree for url. + """ + if text_type(node.location) == url: + return node + for child in node.get_children(): + found = find(child, url) + if found: + return found + return None + + unit = find(course, url) + if unit is None: + raise DashboardError(_(u"Couldn't find module for url: {0}").format(url)) + return unit + + +def get_units_with_due_date(course): + """ + Returns all top level units which have due dates. Does not return + descendents of those nodes. + """ + units = [] + + def visit(node): + """ + Visit a node. Checks to see if node has a due date and appends to + `units` if it does. Otherwise recurses into children to search for + nodes with due dates. + """ + if getattr(node, 'due', None): + units.append(node) + else: + for child in node.get_children(): + visit(child) + visit(course) + #units.sort(key=_title_or_url) + return units + + +def title_or_url(node): + """ + Returns the `display_name` attribute of the passed in node of the course + tree, if it has one. Otherwise returns the node's url. + """ + title = getattr(node, 'display_name', None) + if not title: + title = text_type(node.location) + return title + + +def set_due_date_extension(course, unit, student, due_date, actor=None, reason=''): + """ + Sets a due date extension. + + Raises: + DashboardError if the unit or extended, due date is invalid or user is + not enrolled in the course. + """ + mode, __ = CourseEnrollment.enrollment_mode_for_user(user=student, course_id=six.text_type(course.id)) + if not mode: + raise DashboardError(_("Could not find student enrollment in the course.")) + + if due_date: + try: + api.set_date_for_block(course.id, unit.location, 'due', due_date, user=student, reason=reason, actor=actor) + except api.MissingDateError: + raise DashboardError(_(u"Unit {0} has no due date to extend.").format(unit.location)) + except api.InvalidDateError: + raise DashboardError(_("An extended due date must be later than the original due date.")) + else: + api.set_date_for_block(course.id, unit.location, 'due', None, user=student, reason=reason, actor=actor) + + +def dump_module_extensions(course, unit): + """ + Dumps data about students with due date extensions for a particular module, + specified by 'url', in a particular course. + """ + header = [_("Username"), _("Full Name"), _("Extended Due Date")] + data = [] + for username, fullname, due_date in api.get_overrides_for_block(course.id, unit.location): + due_date = due_date.strftime(u'%Y-%m-%d %H:%M') + data.append(dict(list(zip(header, (username, fullname, due_date))))) + data.sort(key=operator.itemgetter(_("Username"))) + return { + "header": header, + "title": _(u"Users with due date extensions for {0}").format( + title_or_url(unit)), + "data": data + } + + +def dump_student_extensions(course, student): + """ + Dumps data about the due date extensions granted for a particular student + in a particular course. + """ + data = [] + header = [_("Unit"), _("Extended Due Date")] + units = get_units_with_due_date(course) + units = {u.location: u for u in units} + query = api.get_overrides_for_user(course.id, student) + for override in query: + location = override['location'].replace(course_key=course.id) + if location not in units: + continue + due = override['actual_date'] + due = due.strftime(u"%Y-%m-%d %H:%M") + title = title_or_url(units[location]) + data.append(dict(list(zip(header, (title, due))))) + data.sort(key=operator.itemgetter(_("Unit"))) + return { + "header": header, + "title": _(u"Due date extensions for {0} {1} ({2})").format( + student.first_name, student.last_name, student.username), + "data": data} + + +def add_block_ids(payload): + """ + rather than manually parsing block_ids from module_ids on the client, pass the block_ids explicitly in the payload + """ + if 'data' in payload: + for ele in payload['data']: + if 'module_id' in ele: + ele['block_id'] = UsageKey.from_string(ele['module_id']).block_id + +# ------------------------------------------------------------------------------ +import cgi + +# ------------------------------------------------------------------------------ +class OdtTable: + '''This class allows to construct an ODT table programmatically. As ODT and + HTML are very similar, this class also allows to contruct an + HTML table.''' + # Some namespace definitions + tns = 'table:' + txns = 'text:' + + def __init__(self, name, paraStyle='podTablePara', cellStyle='podTableCell', + nbOfCols=1, paraHeaderStyle=None, cellHeaderStyle=None, + html=False): + # An ODT table must have a name. In the case of an HTML table, p_name + # represents the CSS class for the whole table. + self.name = name + # The default style of every paragraph within cells + self.paraStyle = paraStyle + # The default style of every cell + self.cellStyle = cellStyle + # The total number of columns + self.nbOfCols = nbOfCols + # The default style of every paragraph within a header cell + self.paraHeaderStyle = paraHeaderStyle or paraStyle + # The default style of every header cell + self.cellHeaderStyle = cellHeaderStyle or 'podTableHeaderCell' + # The buffer where the resulting table will be rendered + self.res = '' + # Do we need to generate an HTML table instead of an ODT table ? + self.html = html + + def dumpCell(self, content, span=1, header=False, + paraStyle=None, cellStyle=None, align=None): + '''Dumps a cell in the table. If no specific p_paraStyle (p_cellStyle) + is given, self.paraStyle (self.cellStyle) is used, excepted if + p_header is True: in that case, self.paraHeaderStyle + (self.cellHeaderStyle) is used. p_align is used only for HTML.''' + if not paraStyle: + if header: paraStyle = self.paraHeaderStyle + else: paraStyle = self.paraStyle + if not cellStyle: + if header: cellStyle = self.cellHeaderStyle + else: cellStyle = self.cellStyle + if not self.html: + self.res += '<%stable-cell %sstyle-name="%s" ' \ + '%snumber-columns-spanned="%d">' % \ + (self.tns, self.tns, cellStyle, self.tns, span) + self.res += '<%sp %sstyle-name="%s">%s' % \ + (self.txns, self.txns, paraStyle, + cgi.escape(str(content)), self.txns) + self.res += '' % self.tns + else: + tag = header and 'th' or 'td' + palign = '' + if align: palign = ' align="%s"' % align + self.res += '<%s colspan="%d"%s>%s' % \ + (tag, span, palign, cgi.escape(str(content)), tag) + + def startRow(self): + if not self.html: + self.res += '<%stable-row>' % self.tns + else: + self.res += '' + + def endRow(self): + if not self.html: + self.res += '' % self.tns + else: + self.res += '' + + def startTable(self): + if not self.html: + self.res += '<%stable %sname="%s">' % (self.tns, self.tns, + self.name) + self.res += '<%stable-column %snumber-columns-repeated="%d"/>' % \ + (self.tns, self.tns, self.nbOfCols) + else: + css = '' + if self.name: css = ' class="%s"' % self.name + self.res += '' % css + + def endTable(self): + if not self.html: + self.res += '' % self.tns + else: + self.res += '' + + def dumpFloat(self, number): + return str(round(number, 2)) + + def get(self): + '''Returns the whole table.''' + if self.html: + return self.res + else: + return self.res.decode('utf-8') +# ------------------------------------------------------------------------------ + +from django.test import TestCase +from django.utils import timezone +from principal.models import Proyecto, Comite + +# Create your tests here. + +def crear_proyecto(nombre, descripcion, fecha_creacion, complejidad_total, estado): + """ + Funcion: Encargada de crear un proyecto para realizacion de pruebas + """ + return Proyecto.objects.create(nombre=nombre, descripcion=descripcion, + fecha_creacion=fecha_creacion, + complejidad_total=complejidad_total, + estado=estado + ) + +def crear_comite(proyecto): + """ + Funcion: Encargada de crear un comite para realizacion de pruebas + """ + return Comite.objects.create(proyecto=proyecto) + +class ComiteTest(TestCase): + + def test_creacion_comite(self): + """ + Se comprueba que el comite es creado exitosamente + """ + tproyecto = crear_proyecto("Proyecto1", "Descripcion1", timezone.now(), 0, "no iniciado") + tproyecto.save() + tproyecto_id = tproyecto.id + tcomite = crear_comite(tproyecto) + tcomite.save() + tcomiteproyecto_id = tcomite.proyecto.id + self.assertEqual(tcomiteproyecto_id, tproyecto_id) + + def test_eliminacion_comite(self): + """ + Se comprueba que al eliminar el proyecto, el comite + asociado al mismo tambien es eliminado + """ + tproyecto = crear_proyecto("Proyecto1", "Descripcion1", timezone.now(), 0, "no iniciado") + tproyecto.save() + tcomite = crear_comite(tproyecto) + tcomite.save() + tcomite_id = tcomite.id + + tproyecto.delete() + tproyecto.save() + tcomite = Comite.objects.all() + self.assertEqual(tcomite.count(), 0) +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +--- +module: ec2_asg_facts +short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS +description: + - Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS +version_added: "2.2" +requirements: [ boto3 ] +author: "Rob White (@wimnat)" +options: + name: + description: + - The prefix or name of the auto scaling group(s) you are searching for. + - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match." + required: false + tags: + description: + - > + A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling + group(s) you are searching for. + required: false +extends_documentation_fragment: + - aws + - ec2 +''' + +EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Find all groups +- ec2_asg_facts: + register: asgs + +# Find a group with matching name/prefix +- ec2_asg_facts: + name: public-webserver-asg + register: asgs + +# Find a group with matching tags +- ec2_asg_facts: + tags: + project: webapp + env: production + register: asgs + +# Find a group with matching name/prefix and tags +- ec2_asg_facts: + name: myproject + tags: + env: production + register: asgs + +# Fail if no groups are found +- ec2_asg_facts: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length == 0 }}" + +# Fail if more than 1 group is found +- ec2_asg_facts: + name: public-webserver-asg + register: asgs + failed_when: "{{ asgs.results | length > 1 }}" +''' + +RETURN = ''' +--- +auto_scaling_group_arn: + description: The Amazon Resource Name of the ASG + returned: success + type: string + sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1" +auto_scaling_group_name: + description: Name of autoscaling group + returned: success + type: str + sample: "public-webapp-production-1" +availability_zones: + description: List of Availability Zones that are enabled for this ASG. + returned: success + type: list + sample: ["us-west-2a", "us-west-2b", "us-west-2a"] +created_time: + description: The date and time this ASG was created, in ISO 8601 format. + returned: success + type: string + sample: "2015-11-25T00:05:36.309Z" +default_cooldown: + description: The default cooldown time in seconds. + returned: success + type: int + sample: 300 +desired_capacity: + description: The number of EC2 instances that should be running in this group. + returned: success + type: int + sample: 3 +health_check_period: + description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. + returned: success + type: int + sample: 30 +health_check_type: + description: The service you want the health status from, one of "EC2" or "ELB". + returned: success + type: str + sample: "ELB" +instances: + description: List of EC2 instances and their status as it relates to the ASG. + returned: success + type: list + sample: [ + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-es22ad25", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": "false" + } + ] +launch_config_name: + description: > + Name of launch configuration associated with the ASG. Same as launch_configuration_name, + provided for compatibility with ec2_asg module. + returned: success + type: str + sample: "public-webapp-production-1" +launch_configuration_name: + description: Name of launch configuration associated with the ASG. + returned: success + type: str + sample: "public-webapp-production-1" +load_balancer_names: + description: List of load balancers names attached to the ASG. + returned: success + type: list + sample: ["elb-webapp-prod"] +max_size: + description: Maximum size of group + returned: success + type: int + sample: 3 +min_size: + description: Minimum size of group + returned: success + type: int + sample: 1 +new_instances_protected_from_scale_in: + description: Whether or not new instances a protected from automatic scaling in. + returned: success + type: boolean + sample: "false" +placement_group: + description: Placement group into which instances are launched, if any. + returned: success + type: str + sample: None +status: + description: The current state of the group when DeleteAutoScalingGroup is in progress. + returned: success + type: str + sample: None +tags: + description: List of tags for the ASG, and whether or not each tag propagates to instances at launch. + returned: success + type: list + sample: [ + { + "key": "Name", + "value": "public-webapp-production-1", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + }, + { + "key": "env", + "value": "production", + "resource_id": "public-webapp-production-1", + "resource_type": "auto-scaling-group", + "propagate_at_launch": "true" + } + ] +target_group_arns: + description: List of ARNs of the target groups that the ASG populates + returned: success + type: list + sample: [ + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-host-hello/1a2b3c4d5e6f1a2b", + "arn:aws:elasticloadbalancing:ap-southeast-2:123456789012:targetgroup/target-group-path-world/abcd1234abcd1234" + ] +target_group_names: + description: List of names of the target groups that the ASG populates + returned: success + type: list + sample: [ + "target-group-host-hello", + "target-group-path-world" + ] +termination_policies: + description: A list of termination policies for the group. + returned: success + type: str + sample: ["Default"] +''' + +import re + +try: + from botocore.exceptions import ClientError +except ImportError: + pass # caught by imported HAS_BOTO3 + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.ec2 import (get_aws_connection_info, boto3_conn, ec2_argument_spec, + camel_dict_to_snake_dict, HAS_BOTO3) + + +def match_asg_tags(tags_to_match, asg): + for key, value in tags_to_match.items(): + for tag in asg['Tags']: + if key == tag['Key'] and value == tag['Value']: + break + else: + return False + return True + + +def find_asgs(conn, module, name=None, tags=None): + """ + Args: + conn (boto3.AutoScaling.Client): Valid Boto3 ASG client. + name (str): Optional name of the ASG you are looking for. + tags (dict): Optional dictionary of tags and values to search for. + + Basic Usage: + >>> name = 'public-webapp-production' + >>> tags = { 'env': 'production' } + >>> conn = boto3.client('autoscaling', region_name='us-west-2') + >>> results = find_asgs(name, conn) + + Returns: + List + [ + { + "auto_scaling_group_arn": ( + "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:" + "autoScalingGroupName/public-webapp-production" + ), + "auto_scaling_group_name": "public-webapp-production", + "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"], + "created_time": "2016-02-02T23:28:42.481000+00:00", + "default_cooldown": 300, + "desired_capacity": 2, + "enabled_metrics": [], + "health_check_grace_period": 300, + "health_check_type": "ELB", + "instances": + [ + { + "availability_zone": "us-west-2c", + "health_status": "Healthy", + "instance_id": "i-047a12cb", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + }, + { + "availability_zone": "us-west-2a", + "health_status": "Healthy", + "instance_id": "i-7a29df2c", + "launch_configuration_name": "public-webapp-production-1", + "lifecycle_state": "InService", + "protected_from_scale_in": false + } + ], + "launch_config_name": "public-webapp-production-1", + "launch_configuration_name": "public-webapp-production-1", + "load_balancer_names": ["public-webapp-production-lb"], + "max_size": 4, + "min_size": 2, + "new_instances_protected_from_scale_in": false, + "placement_group": None, + "status": None, + "suspended_processes": [], + "tags": + [ + { + "key": "Name", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "public-webapp-production" + }, + { + "key": "env", + "propagate_at_launch": true, + "resource_id": "public-webapp-production", + "resource_type": "auto-scaling-group", + "value": "production" + } + ], + "target_group_names": [], + "target_group_arns": [], + "termination_policies": + [ + "Default" + ], + "vpc_zone_identifier": + [ + "subnet-a1b1c1d1", + "subnet-a2b2c2d2", + "subnet-a3b3c3d3" + ] + } + ] + """ + + try: + asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') + asgs = asgs_paginator.paginate().build_full_result() + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + if not asgs: + return asgs + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + elbv2 = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except ClientError as e: + # This is nice to have, not essential + elbv2 = None + matched_asgs = [] + + if name is not None: + # if the user didn't specify a name + name_prog = re.compile(r'^' + name) + + for asg in asgs['AutoScalingGroups']: + if name: + matched_name = name_prog.search(asg['AutoScalingGroupName']) + else: + matched_name = True + + if tags: + matched_tags = match_asg_tags(tags, asg) + else: + matched_tags = True + + if matched_name and matched_tags: + asg = camel_dict_to_snake_dict(asg) + # compatibility with ec2_asg module + asg['launch_config_name'] = asg['launch_configuration_name'] + # workaround for https://github.com/ansible/ansible/pull/25015 + if 'target_group_ar_ns' in asg: + asg['target_group_arns'] = asg['target_group_ar_ns'] + del(asg['target_group_ar_ns']) + if asg.get('target_group_arns'): + if elbv2: + try: + tg_paginator = elbv2.get_paginator('describe_target_groups') + tg_result = tg_paginator.paginate(TargetGroupArns=asg['target_group_arns']).build_full_result() + asg['target_group_names'] = [tg['TargetGroupName'] for tg in tg_result['TargetGroups']] + except ClientError as e: + if e.response['Error']['Code'] == 'TargetGroupNotFound': + asg['target_group_names'] = [] + else: + asg['target_group_names'] = [] + matched_asgs.append(asg) + + return matched_asgs + + +def main(): + + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(type='str'), + tags=dict(type='dict'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO3: + module.fail_json(msg='boto3 required for this module') + + asg_name = module.params.get('name') + asg_tags = module.params.get('tags') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except ClientError as e: + module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response)) + + results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) + module.exit_json(results=results) + + +if __name__ == '__main__': + main() + +# Copyright (c) 2014 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.tests.unit.virt.xenapi import stubs +from nova import utils +from nova.virt.xenapi.client import objects + + +class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB): + def setUp(self): + super(XenAPISessionObjectTestCase, self).setUp() + self.session = mock.Mock() + self.obj = objects.XenAPISessionObject(self.session, "FAKE") + + def test_call_method_via_attr(self): + self.session.call_xenapi.return_value = "asdf" + + result = self.obj.get_X("ref") + + self.assertEqual(result, "asdf") + self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref") + + +class ObjectsTestCase(stubs.XenAPITestBaseNoDB): + def setUp(self): + super(ObjectsTestCase, self).setUp() + self.session = mock.Mock() + + def test_VM(self): + vm = objects.VM(self.session) + vm.get_X("ref") + self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref") + + def test_SR(self): + sr = objects.SR(self.session) + sr.get_X("ref") + self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref") + + def test_VDI(self): + vdi = objects.VDI(self.session) + vdi.get_X("ref") + self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref") + + def test_VBD(self): + vbd = objects.VBD(self.session) + vbd.get_X("ref") + self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref") + + def test_PBD(self): + pbd = objects.PBD(self.session) + pbd.get_X("ref") + self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref") + + def test_PIF(self): + pif = objects.PIF(self.session) + pif.get_X("ref") + self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref") + + def test_VLAN(self): + vlan = objects.VLAN(self.session) + vlan.get_X("ref") + self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref") + + def test_host(self): + host = objects.Host(self.session) + host.get_X("ref") + self.session.call_xenapi.assert_called_once_with("host.get_X", "ref") + + def test_network(self): + network = objects.Network(self.session) + network.get_X("ref") + self.session.call_xenapi.assert_called_once_with("network.get_X", + "ref") + + def test_pool(self): + pool = objects.Pool(self.session) + pool.get_X("ref") + self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref") + + +class VBDTestCase(stubs.XenAPITestBaseNoDB): + def setUp(self): + super(VBDTestCase, self).setUp() + self.session = mock.Mock() + self.session.VBD = objects.VBD(self.session) + + def test_plug(self): + self.session.VBD.plug("vbd_ref", "vm_ref") + self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref") + + def test_unplug(self): + self.session.VBD.unplug("vbd_ref", "vm_ref") + self.session.call_xenapi.assert_called_once_with("VBD.unplug", + "vbd_ref") + + @mock.patch.object(utils, 'synchronized') + def test_vbd_plug_check_synchronized(self, mock_synchronized): + self.session.VBD.unplug("vbd_ref", "vm_ref") + mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref") + +#!/usr/bin/env python +# Copyright (c) 2011 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Rewrites paths in -I, -L and other option to be relative to a sysroot.""" + +import sys +import os +import optparse + +REWRITE_PREFIX = ['-I', + '-idirafter', + '-imacros', + '-imultilib', + '-include', + '-iprefix', + '-iquote', + '-isystem', + '-L'] + +def RewritePath(path, opts): + """Rewrites a path by stripping the prefix and prepending the sysroot.""" + sysroot = opts.sysroot + prefix = opts.strip_prefix + if os.path.isabs(path) and not path.startswith(sysroot): + if path.startswith(prefix): + path = path[len(prefix):] + path = path.lstrip('/') + return os.path.join(sysroot, path) + else: + return path + + +def RewriteLine(line, opts): + """Rewrites all the paths in recognized options.""" + args = line.split() + count = len(args) + i = 0 + while i < count: + for prefix in REWRITE_PREFIX: + # The option can be either in the form "-I /path/to/dir" or + # "-I/path/to/dir" so handle both. + if args[i] == prefix: + i += 1 + try: + args[i] = RewritePath(args[i], opts) + except IndexError: + sys.stderr.write('Missing argument following %s\n' % prefix) + break + elif args[i].startswith(prefix): + args[i] = prefix + RewritePath(args[i][len(prefix):], opts) + i += 1 + + return ' '.join(args) + + +def main(argv): + parser = optparse.OptionParser() + parser.add_option('-s', '--sysroot', default='/', help='sysroot to prepend') + parser.add_option('-p', '--strip-prefix', default='', help='prefix to strip') + opts, args = parser.parse_args(argv[1:]) + + for line in sys.stdin.readlines(): + line = RewriteLine(line.strip(), opts) + print line + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) + +from __future__ import print_function + +import urlparse +import mimetypes +from StringIO import StringIO + +from django.conf import settings +from django.core.cache import cache +from django.utils.text import force_unicode +from django.core.files.storage import Storage +from django.http import HttpResponse, HttpResponseNotFound +from django.core.exceptions import ImproperlyConfigured + +try: + import mogilefs +except ImportError: + raise ImproperlyConfigured("Could not load mogilefs dependency.\ + \nSee http://mogilefs.pbworks.com/Client-Libraries") + + +class MogileFSStorage(Storage): + """MogileFS filesystem storage""" + def __init__(self, base_url=settings.MEDIA_URL): + + # the MOGILEFS_MEDIA_URL overrides MEDIA_URL + if hasattr(settings, 'MOGILEFS_MEDIA_URL'): + self.base_url = settings.MOGILEFS_MEDIA_URL + else: + self.base_url = base_url + + for var in ('MOGILEFS_TRACKERS', 'MOGILEFS_DOMAIN',): + if not hasattr(settings, var): + raise ImproperlyConfigured("You must define %s to use the MogileFS backend." % var) + + self.trackers = settings.MOGILEFS_TRACKERS + self.domain = settings.MOGILEFS_DOMAIN + self.client = mogilefs.Client(self.domain, self.trackers) + + def get_mogile_paths(self, filename): + return self.client.get_paths(filename) + + # The following methods define the Backend API + + def filesize(self, filename): + raise NotImplemented + #return os.path.getsize(self._get_absolute_path(filename)) + + def path(self, filename): + paths = self.get_mogile_paths(filename) + if paths: + return self.get_mogile_paths(filename)[0] + else: + return None + + def url(self, filename): + return urlparse.urljoin(self.base_url, filename).replace('\\', '/') + + def open(self, filename, mode='rb'): + raise NotImplemented + #return open(self._get_absolute_path(filename), mode) + + def exists(self, filename): + return filename in self.client + + def save(self, filename, raw_contents): + filename = self.get_available_filename(filename) + + if not hasattr(self, 'mogile_class'): + self.mogile_class = None + + # Write the file to mogile + success = self.client.send_file(filename, StringIO(raw_contents), self.mogile_class) + if success: + print("Wrote file to key %s, %s@%s" % (filename, self.domain, self.trackers[0])) + else: + print("FAILURE writing file %s" % (filename)) + + return force_unicode(filename.replace('\\', '/')) + + def delete(self, filename): + + self.client.delete(filename) + + +def serve_mogilefs_file(request, key=None): + """ + Called when a user requests an image. + Either reproxy the path to perlbal, or serve the image outright + """ + # not the best way to do this, since we create a client each time + mimetype = mimetypes.guess_type(key)[0] or "application/x-octet-stream" + client = mogilefs.Client(settings.MOGILEFS_DOMAIN, settings.MOGILEFS_TRACKERS) + if hasattr(settings, "SERVE_WITH_PERLBAL") and settings.SERVE_WITH_PERLBAL: + # we're reproxying with perlbal + + # check the path cache + + path = cache.get(key) + + if not path: + path = client.get_paths(key) + cache.set(key, path, 60) + + if path: + response = HttpResponse(content_type=mimetype) + response['X-REPROXY-URL'] = path[0] + else: + response = HttpResponseNotFound() + + else: + # we don't have perlbal, let's just serve the image via django + file_data = client[key] + if file_data: + response = HttpResponse(file_data, mimetype=mimetype) + else: + response = HttpResponseNotFound() + + return response + +######################## BEGIN LICENSE BLOCK ######################## +# The Original Code is Mozilla Universal charset detector code. +# +# The Initial Developer of the Original Code is +# Simon Montagu +# Portions created by the Initial Developer are Copyright (C) 2005 +# the Initial Developer. All Rights Reserved. +# +# Contributor(s): +# Mark Pilgrim - port to Python +# Shy Shalom - original C code +# Shoshannah Forbes - original C code (?) +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +# 02110-1301 USA +######################### END LICENSE BLOCK ######################### + +# 255: Control characters that usually does not exist in any text +# 254: Carriage/Return +# 253: symbol (punctuation) that does not belong to word +# 252: 0 - 9 + +# Windows-1255 language model +# Character Mapping Table: +win1255_CharToOrderMap = ( +255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 +255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 +253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 +252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 +253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40 + 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50 +253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60 + 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70 +124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214, +215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221, + 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227, +106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234, + 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237, +238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250, + 9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23, + 12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253, +) + +# Model Table: +# total sequences: 100% +# first 512 sequences: 98.4004% +# first 1024 sequences: 1.5981% +# rest sequences: 0.087% +# negative sequences: 0.0015% +HebrewLangModel = ( +0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0, +3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2, +1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2, +1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3, +1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2, +1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2, +1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2, +0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2, +0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2, +1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2, +0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1, +0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0, +0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2, +0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2, +0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2, +0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2, +0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2, +0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2, +0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1, +0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2, +0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0, +3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2, +0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0, +3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2, +0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2, +0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0, +1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2, +0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0, +3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, +3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3, +0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0, +0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, +0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0, +0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, +0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0, +2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0, +0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1, +0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2, +0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0, +3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0, +0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1, +1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1, +0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1, +2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1, +1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1, +2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1, +1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1, +2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1, +2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0, +0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1, +1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1, +0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, +0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0, +) + +Win1255HebrewModel = { + 'charToOrderMap': win1255_CharToOrderMap, + 'precedenceMatrix': HebrewLangModel, + 'mTypicalPositiveRatio': 0.984004, + 'keepEnglishLetter': False, + 'charsetName': "windows-1255" +} + +# flake8: noqa + +# Copyright 2012 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2012 OpenStack Foundation +# Copyright 2012 Nebula, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import futurist + +from django.conf import settings +from django.core.urlresolvers import reverse +from django.core.urlresolvers import reverse_lazy +from django.utils.translation import ugettext_lazy as _ + +from horizon import exceptions +from horizon import forms +from horizon import tables +from horizon.utils import memoized + +from openstack_dashboard import api + +from openstack_dashboard.dashboards.admin.instances \ + import forms as project_forms +from openstack_dashboard.dashboards.admin.instances \ + import tables as project_tables +from openstack_dashboard.dashboards.admin.instances import tabs +from openstack_dashboard.dashboards.project.instances import views +from openstack_dashboard.dashboards.project.instances.workflows \ + import update_instance + + +# re-use console from project.instances.views to make reflection work +def console(args, **kvargs): + return views.console(args, **kvargs) + + +# re-use vnc from project.instances.views to make reflection work +def vnc(args, **kvargs): + return views.vnc(args, **kvargs) + + +# re-use spice from project.instances.views to make reflection work +def spice(args, **kvargs): + return views.spice(args, **kvargs) + + +# re-use rdp from project.instances.views to make reflection work +def rdp(args, **kvargs): + return views.rdp(args, **kvargs) + + +# re-use get_resource_id_by_name from project.instances.views +def swap_filter(resources, filters, fake_field, real_field): + return views.swap_filter(resources, filters, fake_field, real_field) + + +class AdminUpdateView(views.UpdateView): + workflow_class = update_instance.AdminUpdateInstance + success_url = reverse_lazy("horizon:admin:instances:index") + + +class AdminIndexView(tables.DataTableView): + table_class = project_tables.AdminInstancesTable + page_title = _("Instances") + + def has_more_data(self, table): + return self._more + + def needs_filter_first(self, table): + return self._needs_filter_first + + def get_data(self): + instances = [] + tenants = [] + tenant_dict = {} + images = [] + flavors = [] + full_flavors = {} + + marker = self.request.GET.get( + project_tables.AdminInstancesTable._meta.pagination_param, None) + default_search_opts = {'marker': marker, + 'paginate': True, + 'all_tenants': True} + + search_opts = self.get_filters(default_search_opts.copy()) + + # If filter_first is set and if there are not other filters + # selected, then search criteria must be provided and return an empty + # list + filter_first = getattr(settings, 'FILTER_DATA_FIRST', {}) + if filter_first.get('admin.instances', False) and \ + len(search_opts) == len(default_search_opts): + self._needs_filter_first = True + self._more = False + return instances + + self._needs_filter_first = False + + def _task_get_tenants(): + # Gather our tenants to correlate against IDs + try: + tmp_tenants, __ = api.keystone.tenant_list(self.request) + tenants.extend(tmp_tenants) + tenant_dict.update([(t.id, t) for t in tenants]) + except Exception: + msg = _('Unable to retrieve instance project information.') + exceptions.handle(self.request, msg) + + def _task_get_images(): + # Gather our images to correlate againts IDs + try: + tmp_images = api.glance.image_list_detailed(self.request)[0] + images.extend(tmp_images) + except Exception: + msg = _("Unable to retrieve image list.") + exceptions.handle(self.request, msg) + + def _task_get_flavors(): + # Gather our flavors to correlate against IDs + try: + tmp_flavors = api.nova.flavor_list(self.request) + flavors.extend(tmp_flavors) + full_flavors.update([(str(flavor.id), flavor) + for flavor in flavors]) + except Exception: + msg = _("Unable to retrieve flavor list.") + exceptions.handle(self.request, msg) + + def _task_get_instances(): + try: + tmp_instances, self._more = api.nova.server_list( + self.request, + search_opts=search_opts) + instances.extend(tmp_instances) + except Exception: + self._more = False + exceptions.handle(self.request, + _('Unable to retrieve instance list.')) + # In case of exception when calling nova.server_list + # don't call api.network + return + + try: + api.network.servers_update_addresses(self.request, instances, + all_tenants=True) + except Exception: + exceptions.handle( + self.request, + message=_('Unable to retrieve IP addresses from Neutron.'), + ignore=True) + + with futurist.ThreadPoolExecutor(max_workers=3) as e: + e.submit(fn=_task_get_tenants) + e.submit(fn=_task_get_images) + e.submit(fn=_task_get_flavors) + + if 'project' in search_opts and \ + not swap_filter(tenants, search_opts, 'project', 'tenant_id'): + self._more = False + return instances + elif 'image_name' in search_opts and \ + not swap_filter(images, search_opts, 'image_name', 'image'): + self._more = False + return instances + elif "flavor_name" in search_opts and \ + not swap_filter(flavors, search_opts, 'flavor_name', 'flavor'): + self._more = False + return instances + + _task_get_instances() + + # Loop through instances to get flavor and tenant info. + for inst in instances: + flavor_id = inst.flavor["id"] + try: + if flavor_id in full_flavors: + inst.full_flavor = full_flavors[flavor_id] + else: + # If the flavor_id is not in full_flavors list, + # gets it via nova api. + inst.full_flavor = api.nova.flavor_get( + self.request, flavor_id) + except Exception: + msg = _('Unable to retrieve instance size information.') + exceptions.handle(self.request, msg) + tenant = tenant_dict.get(inst.tenant_id, None) + inst.tenant_name = getattr(tenant, "name", None) + return instances + + +class LiveMigrateView(forms.ModalFormView): + form_class = project_forms.LiveMigrateForm + template_name = 'admin/instances/live_migrate.html' + context_object_name = 'instance' + success_url = reverse_lazy("horizon:admin:instances:index") + page_title = _("Live Migrate") + success_label = page_title + + def get_context_data(self, **kwargs): + context = super(LiveMigrateView, self).get_context_data(**kwargs) + context["instance_id"] = self.kwargs['instance_id'] + return context + + @memoized.memoized_method + def get_hosts(self, *args, **kwargs): + try: + return api.nova.host_list(self.request) + except Exception: + redirect = reverse("horizon:admin:instances:index") + msg = _('Unable to retrieve host information.') + exceptions.handle(self.request, msg, redirect=redirect) + + @memoized.memoized_method + def get_object(self, *args, **kwargs): + instance_id = self.kwargs['instance_id'] + try: + return api.nova.server_get(self.request, instance_id) + except Exception: + redirect = reverse("horizon:admin:instances:index") + msg = _('Unable to retrieve instance details.') + exceptions.handle(self.request, msg, redirect=redirect) + + def get_initial(self): + initial = super(LiveMigrateView, self).get_initial() + _object = self.get_object() + if _object: + current_host = getattr(_object, 'OS-EXT-SRV-ATTR:host', '') + initial.update({'instance_id': self.kwargs['instance_id'], + 'current_host': current_host, + 'hosts': self.get_hosts()}) + return initial + + +class DetailView(views.DetailView): + tab_group_class = tabs.AdminInstanceDetailTabs + redirect_url = 'horizon:admin:instances:index' + image_url = 'horizon:admin:images:detail' + volume_url = 'horizon:admin:volumes:detail' + + def _get_actions(self, instance): + table = project_tables.AdminInstancesTable(self.request) + return table.render_row_actions(instance) + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from heat.engine import signal_responder +from heat.engine import clients +from heat.engine import resource +from heat.engine import scheduler +from heat.engine.resources import nova_utils +from heat.engine.resources import volume + +from heat.common import exception +from heat.engine.resources.network_interface import NetworkInterface + +from heat.openstack.common.gettextutils import _ +from heat.openstack.common import log as logging + +logger = logging.getLogger(__name__) + + +class Restarter(signal_responder.SignalResponder): + properties_schema = { + 'InstanceId': { + 'Type': 'String', + 'Required': True, + 'Description': _('Instance ID to be restarted.')}} + attributes_schema = { + "AlarmUrl": _("A signed url to handle the alarm " + "(Heat extension).") + } + + def _find_resource(self, resource_id): + ''' + Return the resource with the specified instance ID, or None if it + cannot be found. + ''' + for resource in self.stack: + if resource.resource_id == resource_id: + return resource + return None + + def handle_signal(self, details=None): + if details is None: + alarm_state = 'alarm' + else: + alarm_state = details.get('state', 'alarm').lower() + + logger.info('%s Alarm, new state %s' % (self.name, alarm_state)) + + if alarm_state != 'alarm': + return + + victim = self._find_resource(self.properties['InstanceId']) + if victim is None: + logger.info('%s Alarm, can not find instance %s' % + (self.name, self.properties['InstanceId'])) + return + + logger.info('%s Alarm, restarting resource: %s' % + (self.name, victim.name)) + self.stack.restart_resource(victim.name) + + def _resolve_attribute(self, name): + ''' + heat extension: "AlarmUrl" returns the url to post to the policy + when there is an alarm. + ''' + if name == 'AlarmUrl' and self.resource_id is not None: + return unicode(self._get_signed_url()) + + +class Instance(resource.Resource): + # AWS does not require InstanceType but Heat does because the nova + # create api call requires a flavor + tags_schema = {'Key': {'Type': 'String', + 'Required': True}, + 'Value': {'Type': 'String', + 'Required': True}} + + properties_schema = { + 'ImageId': { + 'Type': 'String', + 'Required': True, + 'Description': _('Glance image ID or name.')}, + 'InstanceType': { + 'Type': 'String', + 'Required': True, + 'Description': _('Nova instance type (flavor).')}, + 'KeyName': { + 'Type': 'String', + 'Description': _('Optional Nova keypair name.')}, + 'AvailabilityZone': { + 'Type': 'String', + 'Description': _('Availability zone to launch the instance in.')}, + 'DisableApiTermination': { + 'Type': 'String', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'KernelId': { + 'Type': 'String', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'Monitoring': { + 'Type': 'Boolean', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'PlacementGroupName': { + 'Type': 'String', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'PrivateIpAddress': { + 'Type': 'String', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'RamDiskId': { + 'Type': 'String', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'SecurityGroups': { + 'Type': 'List', + 'Description': _('Security group names to assign.')}, + 'SecurityGroupIds': { + 'Type': 'List', + 'Description': _('Security group IDs to assign.')}, + 'NetworkInterfaces': { + 'Type': 'List', + 'Description': _('Network interfaces to associate with ' + 'instance.')}, + 'SourceDestCheck': { + 'Type': 'Boolean', + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'SubnetId': { + 'Type': 'String', + 'Description': _('Subnet ID to launch instance in.')}, + 'Tags': { + 'Type': 'List', + 'Schema': {'Type': 'Map', 'Schema': tags_schema}, + 'Description': _('Tags to attach to instance.')}, + 'NovaSchedulerHints': { + 'Type': 'List', + 'Schema': {'Type': 'Map', 'Schema': tags_schema}, + 'Description': _('Scheduler hints to pass ' + 'to Nova (Heat extension).')}, + 'Tenancy': { + 'Type': 'String', + 'AllowedValues': ['dedicated', 'default'], + 'Implemented': False, + 'Description': _('Not Implemented.')}, + 'UserData': { + 'Type': 'String', + 'Description': _('User data to pass to instance.')}, + 'Volumes': { + 'Type': 'List', + 'Description': _('Volumes to attach to instance.')}} + + attributes_schema = {'AvailabilityZone': _('The Availability Zone where ' + 'the specified instance is ' + 'launched.'), + 'PrivateDnsName': _('Private DNS name of the' + ' specified instance.'), + 'PublicDnsName': _('Public DNS name of the specified ' + 'instance.'), + 'PrivateIp': _('Private IP address of the specified ' + 'instance.'), + 'PublicIp': _('Public IP address of the specified ' + 'instance.')} + + update_allowed_keys = ('Metadata', 'Properties') + update_allowed_properties = ('InstanceType',) + + def __init__(self, name, json_snippet, stack): + super(Instance, self).__init__(name, json_snippet, stack) + self.ipaddress = None + self.mime_string = None + + def _set_ipaddress(self, networks): + ''' + Read the server's IP address from a list of networks provided by Nova + ''' + # Just record the first ipaddress + for n in networks: + if len(networks[n]) > 0: + self.ipaddress = networks[n][0] + break + + def _ipaddress(self): + ''' + Return the server's IP address, fetching it from Nova if necessary + ''' + if self.ipaddress is None: + self.ipaddress = nova_utils.server_to_ipaddress( + self.nova(), self.resource_id) + + return self.ipaddress or '0.0.0.0' + + def _resolve_attribute(self, name): + res = None + if name == 'AvailabilityZone': + res = self.properties['AvailabilityZone'] + elif name in ['PublicIp', 'PrivateIp', 'PublicDnsName', + 'PrivateDnsName']: + res = self._ipaddress() + + logger.info('%s._resolve_attribute(%s) == %s' % (self.name, name, res)) + return unicode(res) if res else None + + def _build_nics(self, network_interfaces, + security_groups=None, subnet_id=None): + + nics = None + + if network_interfaces: + unsorted_nics = [] + for entry in network_interfaces: + nic = (entry + if not isinstance(entry, basestring) + else {'NetworkInterfaceId': entry, + 'DeviceIndex': len(unsorted_nics)}) + unsorted_nics.append(nic) + sorted_nics = sorted(unsorted_nics, + key=lambda nic: int(nic['DeviceIndex'])) + nics = [{'port-id': nic['NetworkInterfaceId']} + for nic in sorted_nics] + else: + # if SubnetId property in Instance, ensure subnet exists + if subnet_id: + neutronclient = self.neutron() + network_id = NetworkInterface.network_id_from_subnet_id( + neutronclient, subnet_id) + # if subnet verified, create a port to use this subnet + # if port is not created explicitly, nova will choose + # the first subnet in the given network. + if network_id: + fixed_ip = {'subnet_id': subnet_id} + props = { + 'admin_state_up': True, + 'network_id': network_id, + 'fixed_ips': [fixed_ip] + } + + if security_groups: + props['security_groups'] = \ + self._get_security_groups_id(security_groups) + + port = neutronclient.create_port({'port': props})['port'] + nics = [{'port-id': port['id']}] + + return nics + + def _get_security_groups_id(self, security_groups): + """Extract security_groups ids from security group list + + This function will be deprecated if Neutron client resolves security + group name to id internally. + + Args: + security_groups : A list contains security_groups ids or names + Returns: + A list of security_groups ids. + """ + ids = [] + response = self.neutron().list_security_groups(self.resource_id) + for item in response: + if item['security_groups'] is not None: + for security_group in security_groups: + for groups in item['security_groups']: + if groups['name'] == security_group \ + and groups['id'] not in ids: + ids.append(groups['id']) + elif groups['id'] == security_group \ + and groups['id'] not in ids: + ids.append(groups['id']) + return ids + + def _get_security_groups(self): + security_groups = [] + for property in ('SecurityGroups', 'SecurityGroupIds'): + if self.properties.get(property) is not None: + for sg in self.properties.get(property): + security_groups.append(sg) + if not security_groups: + security_groups = None + return security_groups + + def get_mime_string(self, userdata): + if not self.mime_string: + self.mime_string = nova_utils.build_userdata(self, userdata) + return self.mime_string + + def handle_create(self): + security_groups = self._get_security_groups() + + userdata = self.properties['UserData'] or '' + flavor = self.properties['InstanceType'] + availability_zone = self.properties['AvailabilityZone'] + + key_name = self.properties['KeyName'] + if key_name: + # confirm keypair exists + nova_utils.get_keypair(self.nova(), key_name) + + image_name = self.properties['ImageId'] + + image_id = nova_utils.get_image_id(self.nova(), image_name) + + flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) + + tags = {} + if self.properties['Tags']: + for tm in self.properties['Tags']: + tags[tm['Key']] = tm['Value'] + else: + tags = None + + scheduler_hints = {} + if self.properties['NovaSchedulerHints']: + for tm in self.properties['NovaSchedulerHints']: + scheduler_hints[tm['Key']] = tm['Value'] + else: + scheduler_hints = None + + nics = self._build_nics(self.properties['NetworkInterfaces'], + security_groups=security_groups, + subnet_id=self.properties['SubnetId']) + server = None + try: + server = self.nova().servers.create( + name=self.physical_resource_name(), + image=image_id, + flavor=flavor_id, + key_name=key_name, + security_groups=security_groups, + userdata=self.get_mime_string(userdata), + meta=tags, + scheduler_hints=scheduler_hints, + nics=nics, + availability_zone=availability_zone) + finally: + # Avoid a race condition where the thread could be cancelled + # before the ID is stored + if server is not None: + self.resource_id_set(server.id) + + return server, scheduler.TaskRunner(self._attach_volumes_task()) + + def _attach_volumes_task(self): + attach_tasks = (volume.VolumeAttachTask(self.stack, + self.resource_id, + volume_id, + device) + for volume_id, device in self.volumes()) + return scheduler.PollingTaskGroup(attach_tasks) + + def check_create_complete(self, cookie): + return self._check_active(cookie) + + def _check_active(self, cookie): + server, volume_attach = cookie + + if not volume_attach.started(): + if server.status != 'ACTIVE': + server.get() + + # Some clouds append extra (STATUS) strings to the status + short_server_status = server.status.split('(')[0] + if short_server_status in nova_utils.deferred_server_statuses: + return False + elif server.status == 'ACTIVE': + self._set_ipaddress(server.networks) + volume_attach.start() + return volume_attach.done() + elif server.status == 'ERROR': + fault = getattr(server, 'fault', {}) + message = fault.get('message', 'Unknown') + code = fault.get('code', 500) + exc = exception.Error(_("Creation of server %(server)s " + "failed: %(message)s (%(code)s)") % + dict(server=server.name, + message=message, + code=code)) + raise exc + else: + exc = exception.Error(_("Creation of server %(server)s failed " + "with unknown status: %(status)s") % + dict(server=server.name, + status=server.status)) + raise exc + else: + return volume_attach.step() + + def volumes(self): + """ + Return an iterator over (volume_id, device) tuples for all volumes + that should be attached to this instance. + """ + volumes = self.properties['Volumes'] + if volumes is None: + return [] + + return ((vol['VolumeId'], vol['Device']) for vol in volumes) + + def handle_update(self, json_snippet, tmpl_diff, prop_diff): + if 'Metadata' in tmpl_diff: + self.metadata = tmpl_diff['Metadata'] + if 'InstanceType' in prop_diff: + flavor = prop_diff['InstanceType'] + flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) + server = self.nova().servers.get(self.resource_id) + server.resize(flavor_id) + checker = scheduler.TaskRunner(nova_utils.check_resize, + server, flavor) + checker.start() + return checker + + def check_update_complete(self, checker): + return checker.step() if checker is not None else True + + def metadata_update(self, new_metadata=None): + ''' + Refresh the metadata if new_metadata is None + ''' + if new_metadata is None: + self.metadata = self.parsed_template('Metadata') + + def validate(self): + ''' + Validate any of the provided params + ''' + res = super(Instance, self).validate() + if res: + return res + + # check validity of key + key_name = self.properties.get('KeyName', None) + if key_name: + nova_utils.get_keypair(self.nova(), key_name) + + # check validity of security groups vs. network interfaces + security_groups = self._get_security_groups() + if security_groups and self.properties.get('NetworkInterfaces'): + raise exception.ResourcePropertyConflict( + 'SecurityGroups/SecurityGroupIds', + 'NetworkInterfaces') + + # make sure the image exists. + nova_utils.get_image_id(self.nova(), self.properties['ImageId']) + + @scheduler.wrappertask + def _delete_server(self, server): + ''' + Return a co-routine that deletes the server and waits for it to + disappear from Nova. + ''' + yield self._detach_volumes_task()() + server.delete() + + while True: + yield + + try: + server.get() + except clients.novaclient.exceptions.NotFound: + self.resource_id = None + break + + def _detach_volumes_task(self): + ''' + Detach volumes from the instance + ''' + detach_tasks = (volume.VolumeDetachTask(self.stack, + self.resource_id, + volume_id) + for volume_id, device in self.volumes()) + return scheduler.PollingTaskGroup(detach_tasks) + + def handle_delete(self): + ''' + Delete an instance, blocking until it is disposed by OpenStack + ''' + if self.resource_id is None: + return + + try: + server = self.nova().servers.get(self.resource_id) + except clients.novaclient.exceptions.NotFound: + self.resource_id = None + return + + server_delete_task = scheduler.TaskRunner(self._delete_server, + server=server) + server_delete_task.start() + return server_delete_task + + def check_delete_complete(self, server_delete_task): + # if the resource was already deleted, server_delete_task will be None + if server_delete_task is None: + return True + else: + return server_delete_task.step() + + def handle_suspend(self): + ''' + Suspend an instance - note we do not wait for the SUSPENDED state, + this is polled for by check_suspend_complete in a similar way to the + create logic so we can take advantage of coroutines + ''' + if self.resource_id is None: + raise exception.Error(_('Cannot suspend %s, resource_id not set') % + self.name) + + try: + server = self.nova().servers.get(self.resource_id) + except clients.novaclient.exceptions.NotFound: + raise exception.NotFound(_('Failed to find instance %s') % + self.resource_id) + else: + logger.debug("suspending instance %s" % self.resource_id) + # We want the server.suspend to happen after the volume + # detachement has finished, so pass both tasks and the server + suspend_runner = scheduler.TaskRunner(server.suspend) + volumes_runner = scheduler.TaskRunner(self._detach_volumes_task()) + return server, suspend_runner, volumes_runner + + def check_suspend_complete(self, cookie): + server, suspend_runner, volumes_runner = cookie + + if not volumes_runner.started(): + volumes_runner.start() + + if volumes_runner.done(): + if not suspend_runner.started(): + suspend_runner.start() + + if suspend_runner.done(): + if server.status == 'SUSPENDED': + return True + + server.get() + logger.debug("%s check_suspend_complete status = %s" % + (self.name, server.status)) + if server.status in list(nova_utils.deferred_server_statuses + + ['ACTIVE']): + return server.status == 'SUSPENDED' + else: + raise exception.Error(_(' nova reported unexpected ' + 'instance[%(instance)s] ' + 'status[%(status)s]') % + {'instance': self.name, + 'status': server.status}) + else: + suspend_runner.step() + else: + return volumes_runner.step() + + def handle_resume(self): + ''' + Resume an instance - note we do not wait for the ACTIVE state, + this is polled for by check_resume_complete in a similar way to the + create logic so we can take advantage of coroutines + ''' + if self.resource_id is None: + raise exception.Error(_('Cannot resume %s, resource_id not set') % + self.name) + + try: + server = self.nova().servers.get(self.resource_id) + except clients.novaclient.exceptions.NotFound: + raise exception.NotFound(_('Failed to find instance %s') % + self.resource_id) + else: + logger.debug("resuming instance %s" % self.resource_id) + server.resume() + return server, scheduler.TaskRunner(self._attach_volumes_task()) + + def check_resume_complete(self, cookie): + return self._check_active(cookie) + + +def resource_mapping(): + return { + 'AWS::EC2::Instance': Instance, + 'OS::Heat::HARestarter': Restarter, + } + +# +# Similar to the 1a_checkalistars, we draw the alignment stars, this time on the combi image (to make a "nice" map) +# + +execfile("../config.py") +from kirbybase import KirbyBase, KBError +from variousfct import * +import star +#import shutil +import f2n +#from datetime import datetime, timedelta + + +# Read reference image info from database +db = KirbyBase() + +refimage = db.select(imgdb, ['imgname'], [refimgname], returnType='dict') +refimage = refimage[0] + + +refsexcat = os.path.join(alidir, refimage['imgname'] + ".cat") +refautostars = star.readsexcat(refsexcat) +refautostars = star.sortstarlistbyflux(refautostars) +refscalingfactor = refimage['scalingfactor'] + +# read and identify the manual reference catalog +refmanstars = star.readmancat(alistarscat) # So these are the "manual" star coordinates +id = star.listidentify(refmanstars, refautostars, tolerance = identtolerance) # We find the corresponding precise sextractor coordinates +preciserefmanstars = star.sortstarlistbyflux(id["match"]) +maxalistars = len(refmanstars) + + +print "%i stars in your manual star catalog." % (len(refmanstars)) +print "%i stars among them could be found in the sextractor catalog." % (len(preciserefmanstars)) + +# We convert the star objects into dictionnaries, to plot them using f2n.py +# (f2n.py does not use these "star" objects...) +refmanstarsasdicts = [{"name":s.name, "x":s.x, "y":s.y} for s in refmanstars] +preciserefmanstarsasdicts = [{"name":s.name, "x":s.x, "y":s.y} for s in preciserefmanstars] +refautostarsasdicts = [{"name":s.name, "x":s.x, "y":s.y} for s in refautostars] + +#print refmanstarsasdicts + +combifitsfile = os.path.join(workdir, "%s.fits" % combibestkey) +#combifitsfile = os.path.join(workdir, "ali", "%s_ali.fits" % refimgname) +f2nimg = f2n.fromfits(combifitsfile) +f2nimg.setzscale(z1=-5, z2=1000) +#f2nimg.rebin(2) +f2nimg.makepilimage(scale = "log", negative = False) + + +#f2nimg.drawstarlist(refautostarsasdicts, r = 30, colour = (150, 150, 150)) +#f2nimg.drawstarlist(preciserefmanstarsasdicts, r = 7, colour = (255, 0, 0)) + + +#f2nimg.writeinfo(["Sextractor stars (flag-filtered) : %i" % len(refautostarsasdicts)], colour = (150, 150, 150)) +#f2nimg.writeinfo(["","Identified alignment stars with corrected sextractor coordinates : %i" % len(preciserefmanstarsasdicts)], colour = (255, 0, 0)) + + +# We draw the rectangles around qso and empty region : + +lims = [map(int,x.split(':')) for x in lensregion[1:-1].split(',')] +#f2nimg.drawrectangle(lims[0][0], lims[0][1], lims[1][0], lims[1][1], colour=(0,255,0), label = "Lens") + +lims = [map(int,x.split(':')) for x in emptyregion[1:-1].split(',')] +#f2nimg.drawrectangle(lims[0][0], lims[0][1], lims[1][0], lims[1][1], colour=(0,255,0), label = "Empty") + + +f2nimg.writetitle("%s / %s" % (xephemlens.split(",")[0], combibestkey)) + +pngpath = os.path.join(workdir, "%s.png" % combibestkey) +f2nimg.tonet(pngpath) + +print "I have written the map into :" +print pngpath + +# print "Do you want to clean the selected image to save some space on the disk ? " +# proquest(True) +# +# combidir = os.path.join(workdir, combibestkey) +# os.remove(combidir) + + +# See readme.md for instructions on running this code. + +import re +import os + +class VirtualFsHandler(object): + def usage(self): + return get_help() + + def triage_message(self, message): + # return True iff we want to (possibly) response to this message + if message['type'] != 'stream': + return False + + original_content = message['content'] + return original_content.startswith('fs ') + + def handle_message(self, message, client, state_handler): + assert self.triage_message(message) + + original_content = message['content'] + command = original_content[len('fs '):] + stream = message['display_recipient'] + topic = message['subject'] + + state = state_handler.get_state() + if state is None: + state = {} + + if stream not in state: + state[stream] = fs_new() + + fs = state[stream] + fs, msg = fs_command(fs, command) + state[stream] = fs + state_handler.set_state(state) + + client.send_message(dict( + type='stream', + to=stream, + subject=topic, + content=msg, + )) + + +def get_help(): + return ''' +The "fs" commands implement a virtual file system for a stream. +The locations of text are persisted for the lifetime of the bot +running, and if you rename a stream, you will lose the info. + +Example commands: + +``` +fs mkdir: create a directory +fs ls: list a directory +fs write: write text +fs read: read text +fs rm: remove a file +``` + +Use commands like `fs help write` for more details on specific +commands. +''' + +def test(): + fs = fs_new() + assert is_directory(fs, '/') + + for cmd, expected_response in sample_conversation(): + fs, msg = fs_command(fs, cmd) + if msg != expected_response: + raise AssertionError(''' + cmd: %s + expected: %s + but got : %s + ''' % (cmd, expected_response, msg)) + +def sample_conversation(): + return [ + ('write /foo contents of /foo', 'file written'), + ('read /foo', 'contents of /foo'), + ('write /bar Contents: bar bar', 'file written'), + ('read /bar', 'Contents: bar bar'), + ('write /bar invalid', 'ERROR: file already exists'), + ('rm /bar', 'removed'), + ('rm /bar', 'ERROR: file does not exist'), + ('write /bar new bar', 'file written'), + ('read /bar', 'new bar'), + ('write /yo/invalid whatever', 'ERROR: /yo is not a directory'), + ('mkdir /yo', 'directory created'), + ('ls /yo', 'WARNING: directory is empty'), + ('read /yo/nada', 'ERROR: file does not exist'), + ('write /yo whatever', 'ERROR: file already exists'), + ('write /yo/apple red', 'file written'), + ('read /yo/apple', 'red'), + ('mkdir /yo/apple', 'ERROR: file already exists'), + ('ls /invalid', 'ERROR: file does not exist'), + ('ls /foo', 'ERROR: /foo is not a directory'), + ('ls /', '* /bar\n* /foo\n* /yo'), + ('invalid command', 'ERROR: unrecognized command'), + ('write', 'ERROR: syntax: write '), + ('help', get_help()), + ('help ls', 'syntax: ls '), + ('help invalid_command', get_help()), + ] + +REGEXES = dict( + command='(ls|mkdir|read|rm|write)', + path='(\S+)', + some_text='(.+)', +) + +def get_commands(): + return { + 'help': (fs_help, ['command']), + 'ls': (fs_ls, ['path']), + 'mkdir': (fs_mkdir, ['path']), + 'read': (fs_read, ['path']), + 'rm': (fs_rm, ['path']), + 'write': (fs_write, ['path', 'some_text']), + } + +def fs_command(fs, cmd): + if cmd.strip() == 'help': + return fs, get_help() + + cmd_name = cmd.split()[0] + commands = get_commands() + if cmd_name not in commands: + return fs, 'ERROR: unrecognized command' + + f, arg_names = commands[cmd_name] + partial_regexes = [cmd_name] + [REGEXES[a] for a in arg_names] + regex = ' '.join(partial_regexes) + m = re.match(regex, cmd) + if m: + return f(fs, *m.groups()) + elif cmd_name == 'help': + return fs, get_help() + else: + return fs, 'ERROR: ' + syntax_help(cmd_name) + +def syntax_help(cmd_name): + commands = get_commands() + f, arg_names = commands[cmd_name] + arg_syntax = ' '.join('<' + a + '>' for a in arg_names) + return 'syntax: %s %s' % (cmd_name, arg_syntax) + +def fs_new(): + fs = { + '/': directory([]) + } + return fs + +def fs_help(fs, cmd_name): + return fs, syntax_help(cmd_name) + +def fs_mkdir(fs, fn): + if fn in fs: + return fs, 'ERROR: file already exists' + dir_path = os.path.dirname(fn) + if not is_directory(fs, dir_path): + msg = 'ERROR: %s is not a directory' % (dir_path,) + return fs, msg + new_fs = fs.copy() + new_dir = directory({fn}.union(fs[dir_path]['fns'])) + new_fs[dir_path] = new_dir + new_fs[fn] = directory([]) + msg = 'directory created' + return new_fs, msg + +def fs_ls(fs, fn): + if fn not in fs: + msg = 'ERROR: file does not exist' + return fs, msg + if not is_directory(fs, fn): + return fs, 'ERROR: %s is not a directory' % (fn,) + fns = fs[fn]['fns'] + if not fns: + return fs, 'WARNING: directory is empty' + msg = '\n'.join('* ' + fn for fn in sorted(fns)) + return fs, msg + +def fs_rm(fs, fn): + if fn not in fs: + msg = 'ERROR: file does not exist' + return fs, msg + new_fs = fs.copy() + new_fs.pop(fn) + msg = 'removed' + return new_fs, msg + +def fs_write(fs, fn, content): + if fn in fs: + msg = 'ERROR: file already exists' + return fs, msg + dir_path = os.path.dirname(fn) + if not is_directory(fs, dir_path): + msg = 'ERROR: %s is not a directory' % (dir_path,) + return fs, msg + new_fs = fs.copy() + new_dir = directory({fn}.union(fs[dir_path]['fns'])) + new_fs[dir_path] = new_dir + new_fs[fn] = text_file(content) + msg = 'file written' + return new_fs, msg + +def fs_read(fs, fn): + if fn not in fs: + msg = 'ERROR: file does not exist' + return fs, msg + val = fs[fn]['content'] + return fs, val + +def directory(fns): + return dict(kind='dir', fns=set(fns)) + +def text_file(content): + return dict(kind='text', content=content) + +def is_directory(fs, fn): + if fn not in fs: + return False + return fs[fn]['kind'] == 'dir' + +handler_class = VirtualFsHandler + +if __name__ == '__main__': + # We eventually want to test bots with a "real" testing + # framework. + test() + +# -*- coding: utf-8; -*- +# +# This file is part of Superdesk. +# +# Copyright 2019 Sourcefabric z.u. and contributors. +# +# For the full copyright and license information, please see the +# AUTHORS and LICENSE files distributed with this source code, or +# at https://www.sourcefabric.org/superdesk/license + +from ..service import ProdApiService