repository_name
stringlengths
7
107
function_path
stringlengths
4
190
function_identifier
stringlengths
1
236
language
stringclasses
1 value
function
stringlengths
9
647k
docstring
stringlengths
5
488k
function_url
stringlengths
71
285
context
stringlengths
0
2.51M
license
stringclasses
5 values
jeeftor/alfredtoday
src/lib/rsa/transform.py
bytes2int
python
def bytes2int(raw_bytes): return int(binascii.hexlify(raw_bytes), 16)
r"""Converts a list of bytes or an 8-bit string to an integer. When using unicode strings, encode it to some encoding like UTF8 first. >>> (((128 * 256) + 64) * 256) + 15 8405007 >>> bytes2int(b'\x80@\x0f') 8405007
https://github.com/jeeftor/alfredtoday/blob/f6e2c2228caa71015e654e1fdbf552e2ca4f90ad/src/lib/rsa/transform.py#L40-L52
from __future__ import absolute_import try: import psyco psyco.full() except ImportError: pass import binascii from struct import pack from rsa import common from rsa._compat import is_integer, b, byte, get_word_alignment, ZERO_BYTE, EMPTY_BYTE
MIT License
nussl/nussl
nussl/separation/spatial/duet.py
Duet._compute_masks
python
def _compute_masks(self): best_so_far = np.inf * np.ones_like(self.stft_ch0, dtype=float) for i in range(0, self.num_sources): mask_array = np.zeros_like(self.stft_ch0, dtype=bool) phase = np.exp(-1j * self.frequency_matrix * self.delay_peak[i]) score = np.abs(self.atn_peak[i] * phase * self.stft_ch0 - self.stft_ch1) ** 2 / (1 + self.atn_peak[i] ** 2) mask = (score < best_so_far) mask_array[mask] = True background_mask = self.mask_type(np.array(mask_array)) self.result_masks.append(background_mask) self.result_masks[0].mask = np.logical_xor(self.result_masks[i].mask, self.result_masks[0].mask) best_so_far[mask] = score[mask] self.result_masks[0].mask = np.logical_not(self.result_masks[0].mask) return self.result_masks
Receives the attenuation and delay peaks and computes a mask to be applied to the signal for source separation.
https://github.com/nussl/nussl/blob/471e7965c5788bff9fe2e1f7884537cae2d18e6f/nussl/separation/spatial/duet.py#L314-L335
import numpy as np from scipy import signal from .. import MaskSeparationBase from ...core import utils from ...core import constants class Duet(MaskSeparationBase): def __init__(self, input_audio_signal, num_sources, attenuation_min=-3, attenuation_max=3, num_attenuation_bins=50, delay_min=-3, delay_max=3, num_delay_bins=50, peak_threshold=0.0, attenuation_min_distance=5, delay_min_distance=5, p=1, q=0, mask_type='binary'): super().__init__( input_audio_signal=input_audio_signal, mask_type=mask_type) self.num_sources = num_sources self.attenuation_min = attenuation_min self.attenuation_max = attenuation_max self.num_attenuation_bins = num_attenuation_bins self.delay_min = delay_min self.delay_max = delay_max self.num_delay_bins = num_delay_bins self.peak_threshold = peak_threshold self.attenuation_min_distance = attenuation_min_distance self.delay_min_distance = delay_min_distance self.p = p self.q = q self.stft_ch0 = None self.stft_ch1 = None self.frequency_matrix = None self.symmetric_atn = None self.delay = None self.num_time_bins = None self.num_frequency_bins = None self.attenuation_bins = None self.delay_bins = None self.normalized_attenuation_delay_histogram = None self.attenuation_delay_histogram = None self.peak_indices = None self.delay_peak = None self.atn_peak = None self.separated_sources = None def run(self): self.result_masks = [] self.stft_ch0, self.stft_ch1, self.frequency_matrix = self._compute_spectrogram( self.sample_rate) self.symmetric_atn, self.delay = self._compute_atn_delay( self.stft_ch0, self.stft_ch1, self.frequency_matrix) self.normalized_attenuation_delay_histogram, self.attenuation_bins, self.delay_bins = ( self._make_histogram() ) self.peak_indices = utils.find_peak_indices( self.normalized_attenuation_delay_histogram, self.num_sources, threshold=self.peak_threshold, min_dist=[self.attenuation_min_distance, self.delay_min_distance]) self.delay_peak, atn_delay_est, self.atn_peak = self._convert_peaks( self.peak_indices) computed_masks = self._compute_masks() return computed_masks def _compute_spectrogram(self, sample_rate): self.audio_signal.stft_params = self.stft_params self.audio_signal.stft() stft_ch0 = self.audio_signal.get_stft_channel(0) stft_ch1 = self.audio_signal.get_stft_channel(1) n_time_bins = len(self.audio_signal.time_bins_vector) wmat = np.array(np.tile(np.mat( self.audio_signal.freq_vector).T, (1, n_time_bins))) * ( 2 * np.pi / sample_rate) wmat += constants.EPSILON return stft_ch0, stft_ch1, wmat @staticmethod def _compute_atn_delay(stft_ch0, stft_ch1, frequency_matrix): inter_channel_ratio = (stft_ch1 + constants.EPSILON) / (stft_ch0 + constants.EPSILON) attenuation = np.abs(inter_channel_ratio) symmetric_attenuation = attenuation - 1 / attenuation relative_delay = -np.imag(np.log(inter_channel_ratio)) / (2 * np.pi * frequency_matrix) return symmetric_attenuation, relative_delay def _make_histogram(self): time_frequency_weights = (np.abs(self.stft_ch0) * np.abs(self.stft_ch1)) ** self.p * (np.abs(self.frequency_matrix)) ** self.q attenuation_premask = np.logical_and(self.attenuation_min < self.symmetric_atn, self.symmetric_atn < self.attenuation_max) delay_premask = np.logical_and(self.delay_min < self.delay, self.delay < self.delay_max) attenuation_delay_premask = np.logical_and(attenuation_premask, delay_premask) nonzero_premask = np.nonzero(attenuation_delay_premask) symmetric_attenuation_vector = self.symmetric_atn[nonzero_premask] delay_vector = self.delay[nonzero_premask] time_frequency_weights_vector = time_frequency_weights[nonzero_premask] bins_array = np.array([self.num_attenuation_bins, self.num_delay_bins]) range_array = np.array([[self.attenuation_min, self.attenuation_max], [self.delay_min, self.delay_max]]) histogram, atn_bins, delay_bins = np.histogram2d(symmetric_attenuation_vector, delay_vector, bins=bins_array, range=range_array, weights=time_frequency_weights_vector) self.attenuation_delay_histogram = histogram histogram /= histogram.max() histogram = self._smooth_matrix(histogram, np.array([3])) return histogram, atn_bins, delay_bins def _convert_peaks(self, peak_indices): atn_indices = [x[0] for x in peak_indices] delay_indices = [x[1] for x in peak_indices] symmetric_atn_peak = self.attenuation_bins[atn_indices] delay_peak = self.delay_bins[delay_indices] atn_delay_est = np.column_stack((symmetric_atn_peak, delay_peak)) atn_peak = (symmetric_atn_peak + np.sqrt(symmetric_atn_peak ** 2 + 4)) / 2 return delay_peak, atn_delay_est, atn_peak
MIT License
bitmovin/bitmovin-api-sdk-python
bitmovin_api_sdk/models/s3_role_based_input.py
S3RoleBasedInput.role_arn
python
def role_arn(self, role_arn): if role_arn is not None: if not isinstance(role_arn, string_types): raise TypeError("Invalid type for `role_arn`, type has to be `string_types`") self._role_arn = role_arn
Sets the role_arn of this S3RoleBasedInput. Amazon ARN of the IAM Role (Identity and Access Management Role) that will be assumed for S3 access. This role has to be created by the owner of the account with the S3 bucket (i.e., you as a customer). For Bitmovin to be able to assume this role, the following has to be added to the trust policy of the role: ``` { \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"arn:aws:iam::630681592166:user/bitmovinCustomerS3Access\" }, \"Action\": \"sts:AssumeRole\", \"Condition\": { \"StringEquals\": { \"sts:ExternalId\": \"{{externalId}}\" } } } ``` where \"arn:aws:iam::630681592166:user/bitmovinCustomerS3Access\" is the Bitmovin user used for the access. The `Condition` is optional but we highly recommend it, see property `externalId` below for more information. This setup allows Bitmovin assume the provided IAM role and to read data from your S3 bucket. Please note that the IAM role has to have read access on S3. For more information about role creation please visit https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-service.html#roles-creatingrole-service-console (required) :param role_arn: The role_arn of this S3RoleBasedInput. :type: string_types
https://github.com/bitmovin/bitmovin-api-sdk-python/blob/79dd938804197151af7cbe5501c7ec1d97872c15/bitmovin_api_sdk/models/s3_role_based_input.py#L123-L137
from enum import Enum from six import string_types, iteritems from bitmovin_api_sdk.common.poscheck import poscheck_model from bitmovin_api_sdk.models.aws_cloud_region import AwsCloudRegion from bitmovin_api_sdk.models.external_id_mode import ExternalIdMode from bitmovin_api_sdk.models.input import Input import pprint import six class S3RoleBasedInput(Input): @poscheck_model def __init__(self, id_=None, name=None, description=None, created_at=None, modified_at=None, custom_data=None, bucket_name=None, role_arn=None, external_id=None, external_id_mode=None, cloud_region=None): super(S3RoleBasedInput, self).__init__(id_=id_, name=name, description=description, created_at=created_at, modified_at=modified_at, custom_data=custom_data) self._bucket_name = None self._role_arn = None self._external_id = None self._external_id_mode = None self._cloud_region = None self.discriminator = None if bucket_name is not None: self.bucket_name = bucket_name if role_arn is not None: self.role_arn = role_arn if external_id is not None: self.external_id = external_id if external_id_mode is not None: self.external_id_mode = external_id_mode if cloud_region is not None: self.cloud_region = cloud_region @property def openapi_types(self): types = {} if hasattr(super(S3RoleBasedInput, self), 'openapi_types'): types = getattr(super(S3RoleBasedInput, self), 'openapi_types') types.update({ 'bucket_name': 'string_types', 'role_arn': 'string_types', 'external_id': 'string_types', 'external_id_mode': 'ExternalIdMode', 'cloud_region': 'AwsCloudRegion' }) return types @property def attribute_map(self): attributes = {} if hasattr(super(S3RoleBasedInput, self), 'attribute_map'): attributes = getattr(super(S3RoleBasedInput, self), 'attribute_map') attributes.update({ 'bucket_name': 'bucketName', 'role_arn': 'roleArn', 'external_id': 'externalId', 'external_id_mode': 'externalIdMode', 'cloud_region': 'cloudRegion' }) return attributes @property def bucket_name(self): return self._bucket_name @bucket_name.setter def bucket_name(self, bucket_name): if bucket_name is not None: if not isinstance(bucket_name, string_types): raise TypeError("Invalid type for `bucket_name`, type has to be `string_types`") self._bucket_name = bucket_name @property def role_arn(self): return self._role_arn @role_arn.setter
MIT License
zeliu98/group-free-3d
utils/logger.py
setup_logger
python
def setup_logger( output=None, distributed_rank=0, *, color=True, name="log", abbrev_name=None ): logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) logger.propagate = False if abbrev_name is None: abbrev_name = name plain_formatter = logging.Formatter( "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S" ) if distributed_rank == 0: ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(logging.DEBUG) if color: formatter = _ColorfulFormatter( colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s", datefmt="%m/%d %H:%M:%S", root_name=name, abbrev_name=str(abbrev_name), ) else: formatter = plain_formatter ch.setFormatter(formatter) logger.addHandler(ch) if output is not None: if output.endswith(".txt") or output.endswith(".log"): filename = output else: filename = os.path.join(output, "log.txt") if distributed_rank > 0: filename = filename + f".rank{distributed_rank}" os.makedirs(os.path.dirname(filename), exist_ok=True) fh = logging.StreamHandler(_cached_log_stream(filename)) fh.setLevel(logging.DEBUG) fh.setFormatter(plain_formatter) logger.addHandler(fh) return logger
Initialize the detectron2 logger and set its verbosity level to "INFO". Args: output (str): a file name or a directory to save log. If None, will not save log file. If ends with ".txt" or ".log", assumed to be a file name. Otherwise, logs will be saved to `output/log.txt`. name (str): the root module name of this logger Returns: logging.Logger: a logger
https://github.com/zeliu98/group-free-3d/blob/ef8b7bb5c3bf5b49b957624595dc6a642b6d0036/utils/logger.py#L31-L87
import functools import logging import os import sys from termcolor import colored class _ColorfulFormatter(logging.Formatter): def __init__(self, *args, **kwargs): self._root_name = kwargs.pop("root_name") + "." self._abbrev_name = kwargs.pop("abbrev_name", "") if len(self._abbrev_name): self._abbrev_name = self._abbrev_name + "." super(_ColorfulFormatter, self).__init__(*args, **kwargs) def formatMessage(self, record): record.name = record.name.replace(self._root_name, self._abbrev_name) log = super(_ColorfulFormatter, self).formatMessage(record) if record.levelno == logging.WARNING: prefix = colored("WARNING", "red", attrs=["blink"]) elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: prefix = colored("ERROR", "red", attrs=["blink", "underline"]) else: return log return prefix + " " + log @functools.lru_cache()
MIT License
microsoft/azure-devops-python-api
azure-devops/azure/devops/v6_0/security/security_client.py
SecurityClient.query_security_namespaces
python
def query_security_namespaces(self, security_namespace_id=None, local_only=None): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') query_parameters = {} if local_only is not None: query_parameters['localOnly'] = self._serialize.query('local_only', local_only, 'bool') response = self._send(http_method='GET', location_id='ce7b9f95-fde9-4be8-a86d-83b366f0b87a', version='6.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[SecurityNamespaceDescription]', self._unwrap_collection(response))
QuerySecurityNamespaces. [Preview API] List all security namespaces or just the specified namespace. :param str security_namespace_id: Security namespace identifier. :param bool local_only: If true, retrieve only local security namespaces. :rtype: [SecurityNamespaceDescription]
https://github.com/microsoft/azure-devops-python-api/blob/451cade4c475482792cbe9e522c1fee32393139e/azure-devops/azure/devops/v6_0/security/security_client.py#L205-L223
 from msrest import Serializer, Deserializer from ...client import Client from . import models class SecurityClient(Client): def __init__(self, base_url=None, creds=None): super(SecurityClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = None def remove_access_control_entries(self, security_namespace_id, token=None, descriptors=None): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') query_parameters = {} if token is not None: query_parameters['token'] = self._serialize.query('token', token, 'str') if descriptors is not None: query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str') response = self._send(http_method='DELETE', location_id='ac08c8ff-4323-4b08-af90-bcd018d380ce', version='6.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('bool', response) def set_access_control_entries(self, container, security_namespace_id): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') content = self._serialize.body(container, 'object') response = self._send(http_method='POST', location_id='ac08c8ff-4323-4b08-af90-bcd018d380ce', version='6.0-preview.1', route_values=route_values, content=content) return self._deserialize('[AccessControlEntry]', self._unwrap_collection(response)) def query_access_control_lists(self, security_namespace_id, token=None, descriptors=None, include_extended_info=None, recurse=None): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') query_parameters = {} if token is not None: query_parameters['token'] = self._serialize.query('token', token, 'str') if descriptors is not None: query_parameters['descriptors'] = self._serialize.query('descriptors', descriptors, 'str') if include_extended_info is not None: query_parameters['includeExtendedInfo'] = self._serialize.query('include_extended_info', include_extended_info, 'bool') if recurse is not None: query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool') response = self._send(http_method='GET', location_id='18a2ad18-7571-46ae-bec7-0c7da1495885', version='6.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[AccessControlList]', self._unwrap_collection(response)) def remove_access_control_lists(self, security_namespace_id, tokens=None, recurse=None): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') query_parameters = {} if tokens is not None: query_parameters['tokens'] = self._serialize.query('tokens', tokens, 'str') if recurse is not None: query_parameters['recurse'] = self._serialize.query('recurse', recurse, 'bool') response = self._send(http_method='DELETE', location_id='18a2ad18-7571-46ae-bec7-0c7da1495885', version='6.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('bool', response) def set_access_control_lists(self, access_control_lists, security_namespace_id): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') content = self._serialize.body(access_control_lists, 'VssJsonCollectionWrapper') self._send(http_method='POST', location_id='18a2ad18-7571-46ae-bec7-0c7da1495885', version='6.0-preview.1', route_values=route_values, content=content) def has_permissions_batch(self, eval_batch): content = self._serialize.body(eval_batch, 'PermissionEvaluationBatch') response = self._send(http_method='POST', location_id='cf1faa59-1b63-4448-bf04-13d981a46f5d', version='6.0-preview.1', content=content) return self._deserialize('PermissionEvaluationBatch', response) def has_permissions(self, security_namespace_id, permissions=None, tokens=None, always_allow_administrators=None, delimiter=None): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') if permissions is not None: route_values['permissions'] = self._serialize.url('permissions', permissions, 'int') query_parameters = {} if tokens is not None: query_parameters['tokens'] = self._serialize.query('tokens', tokens, 'str') if always_allow_administrators is not None: query_parameters['alwaysAllowAdministrators'] = self._serialize.query('always_allow_administrators', always_allow_administrators, 'bool') if delimiter is not None: query_parameters['delimiter'] = self._serialize.query('delimiter', delimiter, 'str') response = self._send(http_method='GET', location_id='dd3b8bd6-c7fc-4cbd-929a-933d9c011c9d', version='6.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[bool]', self._unwrap_collection(response)) def remove_permission(self, security_namespace_id, descriptor, permissions=None, token=None): route_values = {} if security_namespace_id is not None: route_values['securityNamespaceId'] = self._serialize.url('security_namespace_id', security_namespace_id, 'str') if permissions is not None: route_values['permissions'] = self._serialize.url('permissions', permissions, 'int') query_parameters = {} if descriptor is not None: query_parameters['descriptor'] = self._serialize.query('descriptor', descriptor, 'str') if token is not None: query_parameters['token'] = self._serialize.query('token', token, 'str') response = self._send(http_method='DELETE', location_id='dd3b8bd6-c7fc-4cbd-929a-933d9c011c9d', version='6.0-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('AccessControlEntry', response)
MIT License
chilcote/unearth
artifacts/active_directory_node.py
fact
python
def fact(): result = "None" net_config = SCDynamicStoreCreate(None, "net", None, None) d = SCDynamicStoreCopyValue(net_config, "com.apple.opendirectoryd.ActiveDirectory") if d: result = d.get("NodeName", None) return {factoid: result}
Returns Active Directory node
https://github.com/chilcote/unearth/blob/1aaa79195850aac8920efe2d632911d19d998fa3/artifacts/active_directory_node.py#L6-L16
from SystemConfiguration import SCDynamicStoreCopyValue, SCDynamicStoreCreate factoid = "active_directory_node"
Apache License 2.0
botfront/rasa-for-botfront
rasa/core/actions/forms.py
FormAction.validate_slots
python
async def validate_slots( self, slot_candidates: Dict[Text, Any], tracker: "DialogueStateTracker", domain: Domain, output_channel: OutputChannel, nlg: NaturalLanguageGenerator, ) -> List[Event]: logger.debug(f"Validating extracted slots: {slot_candidates}") events = [ SlotSet(slot_name, value) for slot_name, value in slot_candidates.items() ] validate_name = f"validate_{self.name()}" if validate_name not in domain.action_names_or_texts: return events _tracker = self._temporary_tracker(tracker, events, domain) _action = RemoteAction(validate_name, self.action_endpoint) validate_events = await _action.run(output_channel, nlg, _tracker, domain) validated_slot_names = [ event.key for event in validate_events if isinstance(event, SlotSet) ] return validate_events + [ event for event in events if event.key not in validated_slot_names ]
Validate the extracted slots. If a custom action is available for validating the slots, we call it to validate them. Otherwise there is no validation. Args: slot_candidates: Extracted slots which are candidates to fill the slots required by the form. tracker: The current conversation tracker. domain: The current model domain. output_channel: The output channel which can be used to send messages to the user. nlg: `NaturalLanguageGenerator` to use for response generation. Returns: The validation events including potential bot messages and `SlotSet` events for the validated slots.
https://github.com/botfront/rasa-for-botfront/blob/6e0e48d0059e197b5f686df1e27935769c3641b7/rasa/core/actions/forms.py#L378-L427
from typing import Text, List, Optional, Union, Any, Dict, Tuple, Set import logging import json from rasa.core.actions import action from rasa.core.actions.loops import LoopAction from rasa.core.channels import OutputChannel from rasa.shared.core.domain import Domain, InvalidDomain, SlotMapping from rasa.core.actions.action import ActionExecutionRejection, RemoteAction from rasa.shared.core.constants import ( ACTION_LISTEN_NAME, REQUESTED_SLOT, LOOP_INTERRUPTED, ) from rasa.shared.constants import UTTER_PREFIX from rasa.shared.core.events import ( Event, SlotSet, ActionExecuted, ActiveLoop, ActionExecutionRejected, ) from rasa.core.nlg import NaturalLanguageGenerator from rasa.shared.core.trackers import DialogueStateTracker from rasa.utils.endpoints import EndpointConfig logger = logging.getLogger(__name__) class FormAction(LoopAction): def __init__( self, form_name: Text, action_endpoint: Optional[EndpointConfig] ) -> None: self._form_name = form_name self.action_endpoint = action_endpoint self._unique_entity_mappings = None def name(self) -> Text: return self._form_name def required_slots(self, domain: Domain) -> List[Text]: return list(domain.slot_mapping_for_form(self.name()).keys()) def from_entity( self, entity: Text, intent: Optional[Union[Text, List[Text]]] = None, not_intent: Optional[Union[Text, List[Text]]] = None, role: Optional[Text] = None, group: Optional[Text] = None, ) -> Dict[Text, Any]: intent, not_intent = self._list_intents(intent, not_intent) return { "type": str(SlotMapping.FROM_ENTITY), "entity": entity, "intent": intent, "not_intent": not_intent, "role": role, "group": group, } def get_mappings_for_slot( self, slot_to_fill: Text, domain: Domain ) -> List[Dict[Text, Any]]: requested_slot_mappings = self._to_list( domain.slot_mapping_for_form(self.name()).get( slot_to_fill, self.from_entity(slot_to_fill) ) ) for requested_slot_mapping in requested_slot_mappings: if ( not isinstance(requested_slot_mapping, dict) or requested_slot_mapping.get("type") is None ): raise TypeError("Provided incompatible slot mapping") return requested_slot_mappings def _create_unique_entity_mappings(self, domain: Domain) -> Set[Text]: unique_entity_slot_mappings = set() duplicate_entity_slot_mappings = set() for slot_mappings in domain.slot_mapping_for_form(self.name()).values(): for slot_mapping in slot_mappings: if slot_mapping.get("type") == str(SlotMapping.FROM_ENTITY): mapping_as_string = json.dumps(slot_mapping, sort_keys=True) if mapping_as_string in unique_entity_slot_mappings: unique_entity_slot_mappings.remove(mapping_as_string) duplicate_entity_slot_mappings.add(mapping_as_string) elif mapping_as_string not in duplicate_entity_slot_mappings: unique_entity_slot_mappings.add(mapping_as_string) return unique_entity_slot_mappings def _entity_mapping_is_unique( self, slot_mapping: Dict[Text, Any], domain: Domain ) -> bool: if self._unique_entity_mappings is None: self._unique_entity_mappings = self._create_unique_entity_mappings(domain) mapping_as_string = json.dumps(slot_mapping, sort_keys=True) return mapping_as_string in self._unique_entity_mappings @staticmethod def intent_is_desired( requested_slot_mapping: Dict[Text, Any], tracker: "DialogueStateTracker" ) -> bool: mapping_intents = requested_slot_mapping.get("intent", []) mapping_not_intents = requested_slot_mapping.get("not_intent", []) intent = tracker.latest_message.intent.get("name") intent_not_blocked = not mapping_intents and intent not in mapping_not_intents return intent_not_blocked or intent in mapping_intents def entity_is_desired( self, slot_mapping: Dict[Text, Any], slot: Text, entity_type_of_slot_to_fill: Optional[Text], tracker: DialogueStateTracker, domain: Domain, ) -> bool: slot_equals_entity = slot == slot_mapping.get("entity") entity_mapping_is_unique = self._entity_mapping_is_unique(slot_mapping, domain) if ( slot_mapping.get("role") is None and slot_mapping.get("group") is None ) or entity_type_of_slot_to_fill != slot_mapping.get("entity"): slot_fulfils_entity_mapping = False else: matching_values = self.get_entity_value( slot_mapping.get("entity"), tracker, slot_mapping.get("role"), slot_mapping.get("group"), ) slot_fulfils_entity_mapping = matching_values is not None return ( slot_equals_entity or entity_mapping_is_unique or slot_fulfils_entity_mapping ) @staticmethod def get_entity_value( name: Text, tracker: "DialogueStateTracker", role: Optional[Text] = None, group: Optional[Text] = None, ) -> Any: value = list( tracker.get_latest_entity_values(name, entity_group=group, entity_role=role) ) if len(value) == 0: value = None elif len(value) == 1: value = value[0] return value def extract_other_slots( self, tracker: DialogueStateTracker, domain: Domain ) -> Dict[Text, Any]: slot_to_fill = self.get_slot_to_fill(tracker) entity_type_of_slot_to_fill = self._get_entity_type_of_slot_to_fill( slot_to_fill, domain ) slot_values = {} for slot in self.required_slots(domain): if slot != slot_to_fill: slot_mappings = self.get_mappings_for_slot(slot, domain) for slot_mapping in slot_mappings: should_fill_entity_slot = ( slot_mapping["type"] == str(SlotMapping.FROM_ENTITY) and self.intent_is_desired(slot_mapping, tracker) and self.entity_is_desired( slot_mapping, slot, entity_type_of_slot_to_fill, tracker, domain, ) ) should_fill_trigger_slot = ( tracker.active_loop_name != self.name() and slot_mapping["type"] == str(SlotMapping.FROM_TRIGGER_INTENT) and self.intent_is_desired(slot_mapping, tracker) ) if should_fill_entity_slot: value = self.get_entity_value( slot_mapping["entity"], tracker, slot_mapping.get("role"), slot_mapping.get("group"), ) elif should_fill_trigger_slot: value = slot_mapping.get("value") else: value = None if value is not None: logger.debug(f"Extracted '{value}' for extra slot '{slot}'.") slot_values[slot] = value break return slot_values def get_slot_to_fill(self, tracker: "DialogueStateTracker") -> Optional[str]: return ( tracker.get_slot(REQUESTED_SLOT) if tracker.active_loop_name == self.name() else None ) def extract_requested_slot( self, tracker: "DialogueStateTracker", domain: Domain ) -> Dict[Text, Any]: slot_to_fill = self.get_slot_to_fill(tracker) logger.debug(f"Trying to extract requested slot '{slot_to_fill}' ...") requested_slot_mappings = self.get_mappings_for_slot(slot_to_fill, domain) for requested_slot_mapping in requested_slot_mappings: logger.debug(f"Got mapping '{requested_slot_mapping}'") if self.intent_is_desired(requested_slot_mapping, tracker): mapping_type = requested_slot_mapping["type"] if mapping_type == str(SlotMapping.FROM_ENTITY): value = self.get_entity_value( requested_slot_mapping.get("entity"), tracker, requested_slot_mapping.get("role"), requested_slot_mapping.get("group"), ) elif mapping_type == str(SlotMapping.FROM_INTENT): value = requested_slot_mapping.get("value") elif mapping_type == str(SlotMapping.FROM_TRIGGER_INTENT): continue elif mapping_type == str(SlotMapping.FROM_TEXT): value = tracker.latest_message.text else: raise InvalidDomain("Provided slot mapping type is not supported") if value is not None: logger.debug( f"Successfully extracted '{value}' for requested slot " f"'{slot_to_fill}'" ) return {slot_to_fill: value} logger.debug(f"Failed to extract requested slot '{slot_to_fill}'") return {}
Apache License 2.0
fkie/multimaster_fkie
fkie_node_manager/src/fkie_node_manager/select_dialog.py
SelectDialog.__init__
python
def __init__(self, items=list(), buttons=QDialogButtonBox.Cancel | QDialogButtonBox.Ok, exclusive=False, preselect_all=False, title='', description='', icon='', parent=None, select_if_single=True, checkitem1='', checkitem2='', closein=0, store_geometry=''): QDialog.__init__(self, parent=parent) self.setObjectName(' - '.join(['SelectDialog', utf8(items)])) self.verticalLayout = QVBoxLayout(self) self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout.setContentsMargins(3, 3, 3, 3) self.filter_field = EnhancedLineEdit(self) self.filter_field.setPlaceholderText("filter") self.filter_field.textChanged.connect(self._on_filter_changed) self.verticalLayout.addWidget(self.filter_field) if description: self.description_frame = QFrame(self) descriptionLayout = QHBoxLayout(self.description_frame) if icon: self.icon_label = QLabel(self.description_frame) self.icon_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed) self.icon_label.setPixmap(QPixmap(icon).scaled(30, 30, Qt.KeepAspectRatio)) descriptionLayout.addWidget(self.icon_label) self.description_label = QLabel(self.description_frame) self.description_label.setWordWrap(True) self.description_label.setText(description) descriptionLayout.addWidget(self.description_label) self.verticalLayout.addWidget(self.description_frame) self.content = MainBox(self) if items: self.scroll_area = QScrollArea(self) self.scroll_area.setFocusPolicy(Qt.NoFocus) self.scroll_area.setObjectName("scroll_area") self.scroll_area.setWidgetResizable(True) self.scroll_area.setWidget(self.content) self.verticalLayout.addWidget(self.scroll_area) self.checkitem1 = checkitem1 self.checkitem1_result = False self.checkitem2 = checkitem2 self.checkitem2_result = False if not exclusive and items: self._ignore_next_toggle = False self.select_all_checkbox = QCheckBox('all entries') self.select_all_checkbox.setTristate(True) self.select_all_checkbox.stateChanged.connect(self._on_select_all_checkbox_stateChanged) self.verticalLayout.addWidget(self.select_all_checkbox) self.content.toggled.connect(self._on_main_toggle) if self.checkitem1: self.checkitem1_checkbox = QCheckBox(self.checkitem1) self.checkitem1_checkbox.stateChanged.connect(self._on_select_checkitem1_checkbox_stateChanged) self.verticalLayout.addWidget(self.checkitem1_checkbox) if self.checkitem2: self.checkitem2_checkbox = QCheckBox(self.checkitem2) self.checkitem2_checkbox.stateChanged.connect(self._on_select_checkitem2_checkbox_stateChanged) self.verticalLayout.addWidget(self.checkitem2_checkbox) if not items: spacerItem = QSpacerItem(1, 1, QSizePolicy.Expanding, QSizePolicy.Expanding) self.verticalLayout.addItem(spacerItem) self._close_timer = None self._closein = closein - 1 if closein > 0: self.closein_label = QLabel("OK in %d sec..." % closein) self.closein_label.setAlignment(Qt.AlignRight) self.verticalLayout.addWidget(self.closein_label) self._close_timer = threading.Timer(1.0, self._on_close_timer) self._close_timer.start() self.buttonBox = QDialogButtonBox(self) self.buttonBox.setObjectName("buttonBox") self.buttonBox.setOrientation(Qt.Horizontal) self.buttonBox.setStandardButtons(buttons) self.buttonBox.accepted.connect(self.accept) self.buttonBox.rejected.connect(self.reject) self.verticalLayout.addWidget(self.buttonBox) if items: self.content.createFieldsFromValues(items, exclusive) if (select_if_single and len(items) == 1) or preselect_all: self.select_all_checkbox.setCheckState(Qt.Checked) if not items or len(items) < 7: self.filter_field.setVisible(False) self._geometry_name = store_geometry if store_geometry and nm.settings().store_geometry: settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE) settings.beginGroup(store_geometry) self.resize(settings.value("size", QSize(480, 320))) pos = settings.value("pos", QPoint(0, 0)) if pos.x() != 0 and pos.y() != 0: self.move(pos) settings.endGroup()
Creates an input dialog. @param items: a list with strings @type items: C{list()}
https://github.com/fkie/multimaster_fkie/blob/386ebf27f41bffdb1896bbcfdccb7c5290ac0eb4/fkie_node_manager/src/fkie_node_manager/select_dialog.py#L58-L165
from python_qt_binding.QtCore import Qt, Signal, QPoint, QSize try: from python_qt_binding.QtGui import QCheckBox, QDialog, QFrame, QDialogButtonBox, QLabel, QLineEdit, QScrollArea, QWidget from python_qt_binding.QtGui import QFormLayout, QHBoxLayout, QVBoxLayout, QSizePolicy, QSpacerItem except Exception: from python_qt_binding.QtWidgets import QCheckBox, QDialog, QFrame, QDialogButtonBox, QLabel, QLineEdit, QScrollArea, QWidget from python_qt_binding.QtWidgets import QFormLayout, QHBoxLayout, QVBoxLayout, QSizePolicy, QSpacerItem from python_qt_binding.QtGui import QPixmap import re import threading import fkie_node_manager as nm from fkie_node_manager_daemon.common import utf8 from fkie_node_manager.editor.line_edit import EnhancedLineEdit class SelectDialog(QDialog): MODAL_DIALOG = None
BSD 3-Clause New or Revised License
crpurcell/friendlyvri
Imports/util_tk.py
ScrolledTreeTab.get_indx_selected
python
def get_indx_selected(self): if self.rowSelected is None: return None else: return int(self.rowSelected)
Return the index of the last row selected.
https://github.com/crpurcell/friendlyvri/blob/d30a99622742e06fe8b8b767b170c7353c281a82/Imports/util_tk.py#L186-L192
try: import Tkinter as tk import ttk import tkFont except Exception: import tkinter as tk from tkinter import ttk import tkinter.font as tkFont import numpy as np from matplotlib.figure import Figure from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg class ScrolledTreeTab(ttk.Frame): def __init__(self, parent, virtEvent="<<tab_row_selected>>", strPad=10, *args, **kw): ttk.Frame.__init__(self, parent, *args, **kw) self.parent = parent self.rowSelected = None self.textSelected = None self.virtEvent = virtEvent self.strPad = strPad self.tree = ttk.Treeview(self, show="headings") vsb = ttk.Scrollbar(self, orient="vertical", command=self.tree.yview) hsb = ttk.Scrollbar(self, orient="horizontal", command=self.tree.xview) self.tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set) self.tree.grid(column=0, row=0, sticky="NSWE") vsb.grid(column=1, row=0, sticky="NS") hsb.grid(column=0, row=1, sticky="WE") self.columnconfigure(0, weight=1) self.columnconfigure(1, weight=0) self.rowconfigure(0, weight=1) self.tree.configure(selectmode="browse") self.tree.bind("<<TreeviewSelect>>", self._on_row_select) def _on_row_select(self, event=None): item = event.widget.selection() if not item=="": indx = event.widget.item(item, "text") self.rowSelected = int(indx) self.textSelected = event.widget.item(item, "value") self.event_generate(self.virtEvent) def _sortby(self, tree, col, descending): data = [(tree.set(child, col), child) for child in tree.get_children('')] data = self._change_numeric_onestep(data) data.sort(reverse=descending) for i, item in enumerate(data): tree.move(item[1], '', i) tree.heading(col, command=lambda col=col: self._sortby(tree, col, int(not descending))) def _change_numeric_onestep(self, data): newData = [] try: for child, col in data: if child=="None": child = "-inf" newData.append((float(child), col)) return newData except Exception: return data def name_columns(self, colNames): self.tree['columns'] = colNames for col in colNames: self.tree.heading(col, text=col, command=lambda c=col: self._sortby(self.tree, c, 0)) strWidth = tkFont.Font().measure(col.title()) self.tree.column(col, width=strWidth + self.strPad) self.tree.column(col, minwidth=strWidth + self.strPad) def insert_rows(self, rows, colNames=None): if colNames is None: colNames = ["Row "+ str(x+1) for x in range(len(rows[0]))] if len(self.tree['columns'])==0: self.tree['columns'] = colNames for col in colNames: self.tree.heading(col, text=col, command=lambda c=col: self._sortby(self.tree, c, 0)) strWidth = tkFont.Font().measure(col.title()) self.tree.column(col, width=strWidth + self.strPad) self.tree.column(col, minwidth=strWidth + self.strPad) rowIndx = 0 for row in rows: row = [str(x) for x in row] self.tree.insert('', 'end', values=row, text=str(rowIndx)) rowIndx += 1 for i, val in enumerate(row): strWidth = tkFont.Font().measure(val.title()) if self.tree.column(colNames[i], width=None)< (strWidth + self.strPad): self.tree.column(colNames[i], width=strWidth + self.strPad) self.tree.column(colNames[i], minwidth=strWidth + self.strPad)
MIT License
directgroup/direct
direct/functionals/challenges.py
fastmri_nmse
python
def fastmri_nmse(gt, pred): gt = _to_numpy(gt)[:, 0, ...] pred = _to_numpy(pred)[:, 0, ...] out = np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2 return torch.from_numpy(np.array(out)).float()
Compute Normalized Mean Square Error metric (NMSE) compatible with the FastMRI challenge.
https://github.com/directgroup/direct/blob/961989bfac0177988de04e8a3ff563db850575e2/direct/functionals/challenges.py#L47-L52
import numpy as np import torch __all__ = ( "fastmri_ssim", "fastmri_psnr", "fastmri_nmse", "calgary_campinas_ssim", "calgary_campinas_psnr", "calgary_campinas_vif", ) def _to_numpy(tensor): if isinstance(tensor, np.ndarray): return tensor return tensor.cpu().numpy() def fastmri_ssim(gt, target): from skimage.metrics import structural_similarity gt = _to_numpy(gt)[:, 0, ...] target = _to_numpy(target)[:, 0, ...] out = structural_similarity( gt.transpose(1, 2, 0), target.transpose(1, 2, 0), multichannel=True, data_range=gt.max(), ) return torch.from_numpy(np.array(out)).float() def fastmri_psnr(gt, pred): gt = _to_numpy(gt)[:, 0, ...] pred = _to_numpy(pred)[:, 0, ...] from skimage.metrics import peak_signal_noise_ratio as psnr out = psnr(image_true=gt, image_test=pred, data_range=gt.max()) return torch.from_numpy(np.array(out)).float()
Apache License 2.0
gamer-os/steam-buddy
tests/test_downloader.py
empty_data
python
def empty_data(fs): fs.create_dir(os.path.expanduser('~')) yield fs
Mock an empty home directory as it should be on the first run
https://github.com/gamer-os/steam-buddy/blob/2df83290d32c03ce71f694166570a2247684e4cd/tests/test_downloader.py#L8-L11
import os import json import pytest from chimera_app.data import Downloader @pytest.fixture
MIT License
seaglass-project/seaglass
common/scan.py
Bcch_Measurement.document
python
def document(self): doc = {} doc['arfcn'] = int(self.arfcn) doc['rx_lev'] = int(self.rx_lev) doc['measurement_blob'] = self.blob bcch = {} bcch['num_channels'] = int(self.num_channels) bcch['num_arfcn'] = int(self.num_arfcn) format_channels = [] format_arfcns = [] for channel in self.channels: format_channels.append(int(channel)) for arfcn in self.arfcns: format_arfcns.append(int(arfcn)) bcch['channels'] = format_channels bcch['arfcns'] = format_arfcns for key in self.data: if self.data[key] is None: bcch[key] = self.data[key] elif key == 'cell_status': bcch[key] = self.data[key] elif key == 'ber': bcch[key] = float(self.data[key]) else: bcch[key] = int(self.data[key]) doc['bcch'] = bcch return doc
This makes a nice formated document that can be inserted to mongo
https://github.com/seaglass-project/seaglass/blob/04ae18807d188b211167acdb329050a173cba0ba/common/scan.py#L332-L382
import copy GPS_FIELDS = ['mode', 'time', 'ept', 'lat', 'lon', 'alt', 'epx', 'epy', 'cpv', 'track', 'speed', 'climb', 'epd', 'eps', 'epc'] def scan_factory(gsm, gps_before, gps_after, sensor_name=None, high_quality=True): gpsb = Gps_Scan(gps_before) gpsa = Gps_Scan(gps_after) gsm_scan = Gsm_Scan(gsm['scan_blob']) gsm_scan.set_error(gsm['error']) gsm_scan.set_jammed(gsm['jammed']) if 'freq_low' in gsm and 'freq_high' in gsm: gsm_scan.set_freq_range(gsm['freq_low'], gsm['freq_high']) if 'antenna' in gsm: gsm_scan.set_antenna(gsm['antenna']) for raw_meas in gsm['measurements']: if 'bcch' in raw_meas: meas = Bcch_Measurement(raw_meas['measurement_blob']) raw_bcch = raw_meas['bcch'] meas.set_arfcn_lst(raw_bcch['arfcns'], raw_bcch['num_arfcn']) meas.set_channel_lst(raw_bcch['channels'], raw_bcch['num_channels']) bcch_cpy = copy.deepcopy(raw_bcch) del bcch_cpy['arfcns'] del bcch_cpy['num_arfcn'] del bcch_cpy['channels'] del bcch_cpy['num_channels'] meas.set_bcch_data(bcch_cpy) else: meas = Gsm_Measurement(raw_meas['measurement_blob']) meas.set_arfcn(raw_meas['arfcn']) meas.set_rx_lev(raw_meas['rx_lev']) gsm_scan.add_measurement(meas) return Scan(gsm_scan, gpsb, gpsa, sensor_name, high_quality) class Scan(): def __init__(self, gsm, gpsb, gpsa, sensor_name=None, high_quality=True): self.gsm = gsm self.gps_before = gpsb self.gps_after = gpsa self.sensor_name = sensor_name self.high_quality = high_quality def document(self): doc = {} doc['gsm'] = self.gsm.document() doc['gps_before'] = self.gps_before.document() doc['gps_after'] = self.gps_after.document() doc['sensor_name'] = self.sensor_name doc['high_quality'] = self.high_quality return doc def get_gsm(self): return self.gsm def get_gps_before(self): return self.gps_before def get_gps_after(self): return self.gps_after def get_sensor_name(self): return self.sensor_name def get_high_quality(self): return self.high_quality class Gps_Scan(): def __init__(self, raw_gps): cur_gps_fields = list(raw_gps.keys()) fields = list(set(cur_gps_fields).intersection(GPS_FIELDS)) self.gps_data = {gps_field: raw_gps[gps_field] for gps_field in fields} def get_gps_data(self): return self.gps_data def get_time(self): try: time = self.gps_data.get('time', None) if time is not None: return str(time) else: return None except: return None def get_mode(self): try: mode = self.gps_data.get('mode', None) if mode is not None: return str(mode) else: return None except: return None def document(self): return self.gps_data class Gsm_Scan(): def __init__(self, blob, freq_low = None, freq_high = None): self.blob = blob self.freq_low = freq_low self.freq_high = freq_high self.error = 0 self.gsm_measurements = [] self.jammed = 0 def set_freq_range(self, freq_low, freq_high): self.freq_low = freq_low self.freq_high = freq_high def set_error(self, error): self.error = error def set_jammed(self, jammed): self.jammed = jammed def get_freq_range(self): return (self.freq_low, self.freq_high) def get_error(self): return self.error def get_jammed(self): return self.jammed def add_measurement(self, measurement): self.gsm_measurements.append(measurement) def measurement_cursor(self): for measurement in self.gsm_measurements: yield measurement def __str__(self): s = "" s += "Error: " + str(self.error) + "\n\n" i = 0 for i in range(len(self.gsm_measurements)): s += "===========================================\n" s += "Measurement " + str(i+1) + ":\n" s += "===========================================\n" s += str(self.gsm_measurements[i]) + "\n" i = i + 1 return s def document(self): doc = {} doc['scan_blob'] = self.blob if self.error == 0: doc['error'] = False else: doc['error'] = True if self.jammed == 1: doc['jammed'] = True else: doc['jammed'] = False if self.freq_low is not None: doc['freq_low'] = self.freq_low if self.freq_high is not None: doc['freq_high'] = self.freq_high measurements = [] for measurement in self.gsm_measurements: measurements.append(measurement.document()) doc['measurements'] = measurements return doc class Gsm_Measurement(): def __init__(self, gsm_blob): self.arfcn = None self.rx_lev = None self.blob = gsm_blob def set_arfcn(self, arfcn): self.arfcn = arfcn def set_rx_lev(self, rx_lev): self.rx_lev = rx_lev def get_arfcn(self): return self.arfcn def get_rx_lev(self): return self.rx_lev def __str__(self): s = "" s += "-------------RAW BEGIN-----------------\n" s += self.blob + "\n" s += "--------------RAW END-----------------\n" s += "arfcn: " + self.arfcn + "\n" s += "rx_lev: " + self.rx_lev + "\n" return s def document(self): doc = {} doc['arfcn'] = int(self.arfcn) doc['rx_lev'] = int(self.rx_lev) doc['measurement_blob'] = self.blob return doc class Bcch_Measurement(Gsm_Measurement): def __init__(self, gsm_blob): super().__init__(gsm_blob) self.data = {} self.num_channels = None self.channels = [] self.num_arfcn = None self.arfcns = [] def set_arfcn_lst(self, arfcn_lst, num_arfcn): self.arfcns = arfcn_lst self.num_arfcn = num_arfcn def set_channel_lst(self, channel_lst, num_channels): self.channels = channel_lst self.num_channels = num_channels def set_bcch_data(self, data): self.data = data def get_arfcn_lst(self): return (self.arfcns, self.num_arfcn) def get_channel_lst(self): return (self.channels, self.num_channels) def get_data(self): return self.data def __str__(self): s = "" s += "-------------RAW BEGIN-----------------\n" s += self.blob + "\n" s += "--------------RAW END-----------------\n" s += "arfcn: " + self.arfcn + "\n" s += "rx_lev: " + self.rx_lev + "\n" s += "num_channels: " + self.num_channels + "\n" s += "channels: " + str(self.channels) + "\n" s += "num_arfcn: " + self.num_arfcn + "\n" s += "arfcns: " + str(self.arfcns) + "\n" for key in self.data: s += key + ": " + str(self.data[key]) + "\n" return s
BSD 3-Clause New or Revised License
henriquemiranda/yambopy
yambopy/io/factories.py
PhPhononTasks
python
def PhPhononTasks(structure,kpoints,ecut,qpoints=None): qe_input = PwIn.from_structure_dict(structure,kpoints=kpoints,ecut=ecut) qe_scf_task = PwTask.from_input(qe_input) if qpoints is None: qpoints = qe_input.kpoints ph_input = PhIn.from_qpoints(qpoints) ph_task = PhTask.from_scf_task([ph_input,qe_scf_task],dependencies=qe_scf_task) matdyn_task = DynmatTask.from_phonon_task(ph_task,dependencies=ph_task) return qe_scf_task, ph_task, matdyn_task
Return a ScfTask, a PhTask and Matdyn task
https://github.com/henriquemiranda/yambopy/blob/41b860c47e95a0d65be2a138b0043278508caee9/yambopy/io/factories.py#L425-L442
from qepy.pw import PwIn from qepy.ph import PhIn from qepy.pwxml import PwXML from qepy.lattice import * from qepy.matdyn import Matdyn from qepy import qepyenv from yambopy.io.inputfile import YamboIn from yambopy.tools.duck import isiter from yambopy.flow import PwTask, PhTask, P2yTask, YamboTask, DynmatTask, YambopyFlow, AbinitTask, E2yTask __all__ = [ , , , , , , , ] class FiniteDifferencesPhononFlow(): def __init__(self,structure,phonon_modes): self.structure = structure if not isinstance(phonon_modes,Matdyn): raise ValueError('phonon_modes must be an instance of Matdyn') self.phonon_modes = phonon_modes def get_tasks(self,path,kpoints,ecut,nscf_bands,nscf_kpoints=None, imodes_list=None,displacements=[0.01,-0.01],iqpoint=0,**kwargs): if imodes_list is None: imodes_list = list(range(self.phonon_modes.nmodes)) if not isiter(displacements): displacements = [displacements] generator = kwargs.pop("generator",PwNscfYamboIPChiTasks) pwin = PwIn.from_structure_dict(self.structure,kpoints=kpoints,ecut=ecut) tasks = [] tasks.extend(generator(pwin.get_structure(),kpoints,ecut,nscf_bands,nscf_kpoints=nscf_kpoints,**kwargs)) reference = [dict(imode=None,displacement=0)] for imode in imodes_list: cart_mode = self.phonon_modes.modes[iqpoint,imode] for displacement in displacements: input_mock = pwin.get_displaced(cart_mode, displacement=displacement) displaced_structure = input_mock.get_structure() new_tasks = generator(displaced_structure,kpoints,ecut, nscf_bands,nscf_kpoints=nscf_kpoints,**kwargs) tasks.extend(new_tasks) reference.append([dict(imode=None,displacement=displacement)]) self.reference = reference return tasks def get_flow(self,path,kpoints,ecut,nscf_bands,nscf_kpoints=None,imodes_list=None,**kwargs): tasks = self.get_tasks(path=path,kpoints=kpoints,ecut=ecut,nscf_bands=nscf_bands, nscf_kpoints=nscf_kpoints,imodes_list=imodes_list,**kwargs) self.yambo_flow = YambopyFlow.from_tasks(path,tasks) return self.yambo_flow def get_dchi(self): raise NotImplementedError('TODO') pass def plot_ax(self,what): raise NotImplementedError('TODO') pass @property def path(self): return self.yambo_flow.path class KpointsConvergenceFlow(): def __init__(self,structure): self.structure = structure def get_tasks(self,scf_kpoints,nscf_kpoints_list,ecut,nscf_bands,**kwargs): generator = kwargs.pop("generator",PwNscfYamboIPChiTasks) qe_input = PwIn.from_structure_dict(self.structure,kpoints=scf_kpoints,ecut=ecut) qe_scf_task = PwTask.from_input(qe_input) tasks = [qe_scf_task] for nscf_kpoints in nscf_kpoints_list: new_tasks = generator(structure=self.structure,kpoints=scf_kpoints, nscf_kpoints=nscf_kpoints, ecut=ecut,nscf_bands=nscf_bands,**kwargs) tasks.extend(new_tasks) return tasks def get_flow(self,path,scf_kpoints,nscf_kpoints_list,ecut,nscf_bands,**kwargs): tasks = self.get_tasks(scf_kpoints=scf_kpoints,ecut=ecut,nscf_bands=nscf_bands, nscf_kpoints_list=nscf_kpoints_list,**kwargs) self.yambo_flow = YambopyFlow.from_tasks(path,tasks,**kwargs) return self.yambo_flow class BandsConvergenceFlow(): def __init__(self,structure): self.structure = structure def get_tasks(self,scf_kpoints,ecut,nscf_kpoints,bands_list,**kwargs): generator = kwargs.pop("generator",YamboIPChiTask) conv_thr = kwargs.pop("conv_thr",qepyenv.CONV_THR) nscf_bands = max(bands_list) tasks = PwNscfTasks(self.structure,kpoints=scf_kpoints,ecut=ecut, nscf_bands=nscf_bands,nscf_kpoints=nscf_kpoints,conv_thr=conv_thr) qe_scf_task, qe_nscf_task, p2y_task = tasks tasks = list(tasks) link_task = p2y_task new_task = p2y_task for bands in sorted(bands_list,reverse=True): new_task = generator(link_task,bands=bands,dependencies=new_task,**kwargs) tasks.append(new_task) link_task = [p2y_task,new_task] return tasks def get_flow(self,path,scf_kpoints,ecut,nscf_kpoints,bands_list,**kwargs): tasks = self.get_tasks(scf_kpoints=scf_kpoints,ecut=ecut,nscf_kpoints=nscf_kpoints, bands_list=bands_list,**kwargs) self.yambo_flow = YambopyFlow.from_tasks(path,tasks,**kwargs) return self.yambo_flow def copy_pp(self): import os import glob import shutil from netCDF4 import Dataset nospin_task = self.get_vars('nospin_task') spin_bands = self.get_vars('spin_bands') src = os.path.join(nospin_task.path,'run') dst = os.path.join(self.path,'run') if not os.path.isdir(dst): os.mkdir(dst) ppfiles = glob.glob(os.path.join(src,'ndb.pp*')) for ppfile_path in ppfiles: ppfile = os.path.basename(ppfile_path) shutil.copy(ppfile_path,os.path.join(dst,ppfile)) main_pp = os.path.join(dst,'ndb.pp') with Dataset(main_pp,'r+') as dst_db: dst_db.variables['SPIN_VARS'][1]=2 dst_db.variables['X_PARS_1'][2]=spin_bands def update_cell_and_positions(self): from qepy import PwXML pwtask = self.get_vars("pwtask") prefix = pwtask.pwinput.prefix pwxml = PwXML(prefix,path=pwtask.path) self.pwinput.update_structure_from_xml(pwxml) def get_scissor(self): import os from yambopy.dbs.qpdb import YamboQPDB qp_task = self.get_vars('qp_task') valence = self.get_vars('valence') qp_path = os.path.join(qp_task.path,'run') qpdb = YamboQPDB.from_db(folder=qp_path) scissor = qpdb.get_scissor(valence,verbose=0)[:3] self.yamboin_dict['KfnQP_E'] = list(scissor) class SpinOrbitFlow(): def __init__(self,structure,structure_spin=None): self.structure_nospin = structure if structure_spin is None: self.structure_spin = structure def get_tasks(self,scf_kpoints,ecut,nscf_kpoints,chi_bands,spin_bands,**kwargs): tasks=[] conv_thr = kwargs.pop("conv_thr",qepyenv.CONV_THR) yamboin_default_dict = dict(BndsRnXp=[1,chi_bands], NGsBlkXp=[1,'Ry'], EXXRLvcs=[10,'Ry'], QPkrange=[1,1,1,spin_bands], GbndRnge=[1,spin_bands]) yamboin_dict = kwargs.pop("yamboin_dict",yamboin_default_dict) generator = kwargs.pop("generator",YamboQPTask) pp_runlevel = kwargs.pop("pp_runlevel",'-p p -V all') qp_runlevel = kwargs.pop("qp_runlevel",'-p p -g n -V all') spin_runlevel = kwargs.pop("spin_runlevel",qp_runlevel) new_tasks = PwNscfTasks(self.structure_nospin,kpoints=scf_kpoints,ecut=ecut, nscf_bands=chi_bands,nscf_kpoints=nscf_kpoints,conv_thr=conv_thr) qe_scf_task, qe_nscf_task, p2y_task = new_tasks nospin_task = generator(p2y_task,runlevel=pp_runlevel,yamboin_dict=yamboin_dict,dependencies=p2y_task,**kwargs) tasks.extend( [qe_scf_task, qe_nscf_task, p2y_task, nospin_task] ) new_tasks = PwNscfTasks(self.structure_spin,kpoints=scf_kpoints,ecut=ecut, nscf_bands=spin_bands,nscf_kpoints=nscf_kpoints,conv_thr=conv_thr) qe_scf_task, qe_nscf_task, p2y_task = new_tasks qe_scf_task.get_instances_from_inputs(PwIn)[0].set_spinorbit() qe_nscf_task.get_instances_from_inputs(PwIn)[0].set_spinorbit() spin_task = generator(p2y_task,runlevel=spin_runlevel,yamboin_dict=yamboin_dict,dependencies=p2y_task,**kwargs) tasks.extend( [qe_scf_task, qe_nscf_task, p2y_task, spin_task] ) spin_task.set_vars('spin_bands',spin_bands) spin_task.set_vars('nospin_task',nospin_task) spin_task.set_code('initialize',copy_pp) return tasks def get_flow(self,path,scf_kpoints,ecut,nscf_kpoints,chi_bands,spin_bands,**kwargs): tasks = self.get_tasks(scf_kpoints=scf_kpoints,ecut=ecut,nscf_kpoints=nscf_kpoints, chi_bands=chi_bands,spin_bands=spin_bands,**kwargs) self.yambo_flow = YambopyFlow.from_tasks(path,tasks,**kwargs) return self.yambo_flow def PwNscfYamboIPChiTasks(structure,kpoints,ecut,nscf_bands, yambo_runlevel='-o c -V all',nscf_kpoints=None,**kwargs): conv_thr = kwargs.pop("conv_thr",qepyenv.CONV_THR) tmp_tasks = PwNscfTasks(structure,kpoints,ecut,nscf_bands,nscf_kpoints,conv_thr=conv_thr) qe_scf_task,qe_nscf_task,p2y_task = tmp_tasks yambo_task = YamboIPChiTask(p2y_task,yambo_runlevel=yambo_runlevel,**kwargs) return qe_scf_task,qe_nscf_task,p2y_task,yambo_task def YamboIPChiTask(p2y_task,**kwargs): yambo_ip_default_dict = dict(QpntsRXd=[1,1], ETStpsXd=1000) yambo_ip_dict = kwargs.pop('yambo_ip_dict',yambo_ip_default_dict) yambo_runlevel = kwargs.pop('yambo_runlevel','-o c -V all') bands = kwargs.pop('bands',None) dependencies = kwargs.pop('dependencies',p2y_task) if bands: yambo_ip_dict['BndsRnXd'] = [1,bands] yambo_task = YamboTask.from_runlevel(p2y_task,yambo_runlevel,yambo_ip_dict, dependencies=dependencies) return yambo_task def YamboQPBSETasks(p2y_task,qp_dict,bse_dict,qp_runlevel='-p p -g n -V all', bse_runlevel='-p p -k sex -y d -V all',valence=False,**kwargs): import copy dependencies = kwargs.pop('dependencies',p2y_task) inputs = [p2y_task] additional_inputs = kwargs.pop('additional_inputs',None) if additional_inputs: if not isiter(additional_inputs): additional_inputs = [additional_inputs] inputs.extend(additional_inputs) qp_task = YamboTask.from_runlevel(inputs,qp_runlevel,qp_dict,dependencies=dependencies) bse_dict['KfnQPdb']="E < run/ndb.QP" qp_bse_task = YamboTask.from_runlevel([p2y_task,qp_task],bse_runlevel,bse_dict, dependencies=qp_task) if valence: bse_dict = copy.deepcopy(bse_dict) bse_dict.pop('KfnQPdb') scissor_bse_task = YamboTask.from_runlevel([p2y_task,qp_task],bse_runlevel,bse_dict, dependencies=qp_task) scissor_bse_task.set_vars('qp_task',qp_task) scissor_bse_task.set_vars('valence',valence) scissor_bse_task.set_code('initialize',get_scissor) return qp_task, qp_bse_task, scissor_bse_task return qp_task, qp_bse_task def YamboQPTask(p2y_task,yamboin_dict,runlevel='-p p -g n -V all',**kwargs): dependencies = kwargs.pop('dependencies',p2y_task) qp_task = YamboTask.from_runlevel(p2y_task,runlevel,yamboin_dict,dependencies=p2y_task) return qp_task
BSD 3-Clause New or Revised License
globocom/globonetworkapi-client-python
networkapiclient/ApiEnvironmentVip.py
ApiEnvironmentVip.__init__
python
def __init__(self, networkapi_url, user, password, user_ldap=None): super(ApiEnvironmentVip, self).__init__( networkapi_url, user, password, user_ldap )
Class constructor receives parameters to connect to the networkAPI. :param networkapi_url: URL to access the network API. :param user: User for authentication. :param password: Password for authentication.
https://github.com/globocom/globonetworkapi-client-python/blob/08dc24c54ee3cd6cdcca1fb33fb4796db8118e6f/networkapiclient/ApiEnvironmentVip.py#L22-L34
from networkapiclient.ApiGenericClient import ApiGenericClient from networkapiclient.utils import build_uri_with_ids class ApiEnvironmentVip(ApiGenericClient):
Apache License 2.0
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_1/task/task_client.py
TaskClient.delete_timeline
python
def delete_timeline(self, scope_identifier, hub_name, plan_id, timeline_id): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') self._send(http_method='DELETE', location_id='83597576-cc2c-453c-bea6-2882ae6a1653', version='5.1', route_values=route_values)
DeleteTimeline. :param str scope_identifier: The project GUID to scope the request :param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server :param str plan_id: :param str timeline_id:
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_1/task/task_client.py#L376-L395
 from msrest import Serializer, Deserializer from ...client import Client from . import models class TaskClient(Client): def __init__(self, base_url=None, creds=None): super(TaskClient, self).__init__(base_url, creds) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) resource_area_identifier = None def get_plan_attachments(self, scope_identifier, hub_name, plan_id, type): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') response = self._send(http_method='GET', location_id='eb55e5d6-2f30-4295-b5ed-38da50b1fc52', version='5.1-preview.1', route_values=route_values) return self._deserialize('[TaskAttachment]', self._unwrap_collection(response)) def create_attachment(self, upload_stream, scope_identifier, hub_name, plan_id, timeline_id, record_id, type, name, **kwargs): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='PUT', location_id='7898f959-9cdf-4096-b29e-7f293031629e', version='5.1-preview.1', route_values=route_values, content=content, media_type='application/octet-stream') return self._deserialize('TaskAttachment', response) def get_attachment(self, scope_identifier, hub_name, plan_id, timeline_id, record_id, type, name): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='7898f959-9cdf-4096-b29e-7f293031629e', version='5.1-preview.1', route_values=route_values) return self._deserialize('TaskAttachment', response) def get_attachment_content(self, scope_identifier, hub_name, plan_id, timeline_id, record_id, type, name, **kwargs): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') if name is not None: route_values['name'] = self._serialize.url('name', name, 'str') response = self._send(http_method='GET', location_id='7898f959-9cdf-4096-b29e-7f293031629e', version='5.1-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback) def get_attachments(self, scope_identifier, hub_name, plan_id, timeline_id, record_id, type): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') if record_id is not None: route_values['recordId'] = self._serialize.url('record_id', record_id, 'str') if type is not None: route_values['type'] = self._serialize.url('type', type, 'str') response = self._send(http_method='GET', location_id='7898f959-9cdf-4096-b29e-7f293031629e', version='5.1-preview.1', route_values=route_values) return self._deserialize('[TaskAttachment]', self._unwrap_collection(response)) def append_log_content(self, upload_stream, scope_identifier, hub_name, plan_id, log_id, **kwargs): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if log_id is not None: route_values['logId'] = self._serialize.url('log_id', log_id, 'int') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None content = self._client.stream_upload(upload_stream, callback=callback) response = self._send(http_method='POST', location_id='46f5667d-263a-4684-91b1-dff7fdcf64e2', version='5.1', route_values=route_values, content=content, media_type='application/octet-stream') return self._deserialize('TaskLog', response) def create_log(self, log, scope_identifier, hub_name, plan_id): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') content = self._serialize.body(log, 'TaskLog') response = self._send(http_method='POST', location_id='46f5667d-263a-4684-91b1-dff7fdcf64e2', version='5.1', route_values=route_values, content=content) return self._deserialize('TaskLog', response) def get_log(self, scope_identifier, hub_name, plan_id, log_id, start_line=None, end_line=None): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if log_id is not None: route_values['logId'] = self._serialize.url('log_id', log_id, 'int') query_parameters = {} if start_line is not None: query_parameters['startLine'] = self._serialize.query('start_line', start_line, 'long') if end_line is not None: query_parameters['endLine'] = self._serialize.query('end_line', end_line, 'long') response = self._send(http_method='GET', location_id='46f5667d-263a-4684-91b1-dff7fdcf64e2', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[str]', self._unwrap_collection(response)) def get_logs(self, scope_identifier, hub_name, plan_id): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') response = self._send(http_method='GET', location_id='46f5667d-263a-4684-91b1-dff7fdcf64e2', version='5.1', route_values=route_values) return self._deserialize('[TaskLog]', self._unwrap_collection(response)) def get_records(self, scope_identifier, hub_name, plan_id, timeline_id, change_id=None): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') query_parameters = {} if change_id is not None: query_parameters['changeId'] = self._serialize.query('change_id', change_id, 'int') response = self._send(http_method='GET', location_id='8893bc5b-35b2-4be7-83cb-99e683551db4', version='5.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TimelineRecord]', self._unwrap_collection(response)) def update_records(self, records, scope_identifier, hub_name, plan_id, timeline_id): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') if timeline_id is not None: route_values['timelineId'] = self._serialize.url('timeline_id', timeline_id, 'str') content = self._serialize.body(records, 'VssJsonCollectionWrapper') response = self._send(http_method='PATCH', location_id='8893bc5b-35b2-4be7-83cb-99e683551db4', version='5.1', route_values=route_values, content=content) return self._deserialize('[TimelineRecord]', self._unwrap_collection(response)) def create_timeline(self, timeline, scope_identifier, hub_name, plan_id): route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') content = self._serialize.body(timeline, 'Timeline') response = self._send(http_method='POST', location_id='83597576-cc2c-453c-bea6-2882ae6a1653', version='5.1', route_values=route_values, content=content) return self._deserialize('Timeline', response)
MIT License
rwbfd/opencompetition
src/nlp/common/networks/modeling_auto.py
AutoModelForSequenceClassification.from_config
python
def from_config(cls, config): if isinstance(config, AlbertConfig): return AlbertForSequenceClassification(config) elif isinstance(config, CamembertConfig): return CamembertForSequenceClassification(config) elif isinstance(config, DistilBertConfig): return DistilBertForSequenceClassification(config) elif isinstance(config, RobertaConfig): return RobertaForSequenceClassification(config) elif isinstance(config, BertConfig): return BertForSequenceClassification(config) elif isinstance(config, XLNetConfig): return XLNetForSequenceClassification(config) elif isinstance(config, XLMConfig): return XLMForSequenceClassification(config) elif isinstance(config, XLMRobertaConfig): return XLMRobertaForSequenceClassification(config) raise ValueError("Unrecognized configuration class {}".format(config))
r""" Instantiates one of the base model classes of the library from a configuration. config: (`optional`) instance of a class derived from :class:`~transformers.PretrainedConfig`: The model class to instantiate is selected based on the configuration class: - isInstance of `distilbert` configuration class: DistilBertModel (DistilBERT model) - isInstance of `roberta` configuration class: RobertaModel (RoBERTa model) - isInstance of `bert` configuration class: BertModel (Bert model) - isInstance of `xlnet` configuration class: XLNetModel (XLNet model) - isInstance of `xlm` configuration class: XLMModel (XLM model) Examples:: config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache. model = AutoModelForSequenceClassification.from_config(config) # E.g. model was saved using `save_pretrained('./test/saved_model/')`
https://github.com/rwbfd/opencompetition/blob/5262fc5fa7efd7b483c1dc09cb7747dd75e37175/src/nlp/common/networks/modeling_auto.py#L554-L587
import logging from .configuration_auto import ( AlbertConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, GPT2Config, OpenAIGPTConfig, RobertaConfig, TransfoXLConfig, XLMConfig, XLMRobertaConfig, XLNetConfig, ) from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP, AlbertForMaskedLM, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertModel, ) from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BertForMaskedLM, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertModel, ) from .modeling_camembert import ( CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP, CamembertForMaskedLM, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from .modeling_ctrl import CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel, CTRLModel from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from .modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2LMHeadModel, GPT2Model from .modeling_openai import OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, OpenAIGPTLMHeadModel, OpenAIGPTModel from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, RobertaForMaskedLM, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from .modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_MAP, T5Model, T5WithLMHeadModel from .modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP, TransfoXLLMHeadModel, TransfoXLModel from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_MAP, XLMForQuestionAnswering, XLMForSequenceClassification, XLMModel, XLMWithLMHeadModel, ) from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, XLMRobertaForMaskedLM, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_MAP, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) logger = logging.getLogger(__name__) ALL_PRETRAINED_MODEL_ARCHIVE_MAP = dict( (key, value) for pretrained_map in [ BERT_PRETRAINED_MODEL_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP, XLM_PRETRAINED_MODEL_ARCHIVE_MAP, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP, T5_PRETRAINED_MODEL_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, ] for key, value, in pretrained_map.items() ) class AutoModel(object): def __init__(self): raise EnvironmentError( "AutoModel is designed to be instantiated " "using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModel.from_config(config)` methods." ) @classmethod def from_config(cls, config): if isinstance(config, DistilBertConfig): return DistilBertModel(config) elif isinstance(config, RobertaConfig): return RobertaModel(config) elif isinstance(config, BertConfig): return BertModel(config) elif isinstance(config, OpenAIGPTConfig): return OpenAIGPTModel(config) elif isinstance(config, GPT2Config): return GPT2Model(config) elif isinstance(config, TransfoXLConfig): return TransfoXLModel(config) elif isinstance(config, XLNetConfig): return XLNetModel(config) elif isinstance(config, XLMConfig): return XLMModel(config) elif isinstance(config, CTRLConfig): return CTRLModel(config) elif isinstance(config, AlbertConfig): return AlbertModel(config) elif isinstance(config, CamembertConfig): return CamembertModel(config) elif isinstance(config, XLMRobertaConfig): return XLMRobertaModel(config) raise ValueError("Unrecognized configuration class {}".format(config)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): if "t5" in pretrained_model_name_or_path: return T5Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "distilbert" in pretrained_model_name_or_path: return DistilBertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "albert" in pretrained_model_name_or_path: return AlbertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "camembert" in pretrained_model_name_or_path: return CamembertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "xlm-roberta" in pretrained_model_name_or_path: return XLMRobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "roberta" in pretrained_model_name_or_path: return RobertaModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "bert" in pretrained_model_name_or_path: return BertModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "openai-gpt" in pretrained_model_name_or_path: return OpenAIGPTModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "gpt2" in pretrained_model_name_or_path: return GPT2Model.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "transfo-xl" in pretrained_model_name_or_path: return TransfoXLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "xlnet" in pretrained_model_name_or_path: return XLNetModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "xlm" in pretrained_model_name_or_path: return XLMModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "ctrl" in pretrained_model_name_or_path: return CTRLModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError( "Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " "'xlm-roberta', 'xlm', 'roberta, 'ctrl', 'distilbert', 'camembert', 'albert'".format( pretrained_model_name_or_path ) ) class AutoModelWithLMHead(object): def __init__(self): raise EnvironmentError( "AutoModelWithLMHead is designed to be instantiated " "using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelWithLMHead.from_config(config)` methods." ) @classmethod def from_config(cls, config): if isinstance(config, DistilBertConfig): return DistilBertForMaskedLM(config) elif isinstance(config, RobertaConfig): return RobertaForMaskedLM(config) elif isinstance(config, BertConfig): return BertForMaskedLM(config) elif isinstance(config, OpenAIGPTConfig): return OpenAIGPTLMHeadModel(config) elif isinstance(config, GPT2Config): return GPT2LMHeadModel(config) elif isinstance(config, TransfoXLConfig): return TransfoXLLMHeadModel(config) elif isinstance(config, XLNetConfig): return XLNetLMHeadModel(config) elif isinstance(config, XLMConfig): return XLMWithLMHeadModel(config) elif isinstance(config, CTRLConfig): return CTRLLMHeadModel(config) elif isinstance(config, XLMRobertaConfig): return XLMRobertaForMaskedLM(config) raise ValueError("Unrecognized configuration class {}".format(config)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): if "t5" in pretrained_model_name_or_path: return T5WithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "distilbert" in pretrained_model_name_or_path: return DistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "albert" in pretrained_model_name_or_path: return AlbertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "camembert" in pretrained_model_name_or_path: return CamembertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "xlm-roberta" in pretrained_model_name_or_path: return XLMRobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "roberta" in pretrained_model_name_or_path: return RobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "bert" in pretrained_model_name_or_path: return BertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "openai-gpt" in pretrained_model_name_or_path: return OpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "gpt2" in pretrained_model_name_or_path: return GPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "transfo-xl" in pretrained_model_name_or_path: return TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "xlnet" in pretrained_model_name_or_path: return XLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "xlm" in pretrained_model_name_or_path: return XLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) elif "ctrl" in pretrained_model_name_or_path: return CTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) raise ValueError( "Unrecognized model identifier in {}. Should contains one of " "'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', " "'xlm-roberta', 'xlm', 'roberta','ctrl', 'distilbert', 'camembert', 'albert'".format( pretrained_model_name_or_path ) ) class AutoModelForSequenceClassification(object): def __init__(self): raise EnvironmentError( "AutoModelForSequenceClassification is designed to be instantiated " "using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or " "`AutoModelForSequenceClassification.from_config(config)` methods." ) @classmethod
Apache License 2.0
mlindauer/autofolio
autofolio/feature_preprocessing/standardscaler.py
StandardScalerWrapper.fit
python
def fit(self, scenario: ASlibScenario, config: Configuration): if config.get("StandardScaler"): self.active = True self.scaler = StandardScaler() self.scaler.fit(scenario.feature_data.values)
fit StandardScaler object to ASlib scenario data Arguments --------- scenario: data.aslib_scenario.ASlibScenario ASlib Scenario with all data in pandas config: ConfigSpace.Configuration configuration
https://github.com/mlindauer/autofolio/blob/f296f528b1b684d36837075b0e8160e3fa4124f7/autofolio/feature_preprocessing/standardscaler.py#L39-L54
import logging import numpy as np import pandas as pd from sklearn.preprocessing import StandardScaler from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter from ConfigSpace.conditions import EqualsCondition, InCondition from ConfigSpace.configuration_space import ConfigurationSpace from ConfigSpace import Configuration from aslib_scenario.aslib_scenario import ASlibScenario __author__ = "Marius Lindauer" __license__ = "BSD" class StandardScalerWrapper(object): @staticmethod def add_params(cs: ConfigurationSpace): switch = CategoricalHyperparameter( "StandardScaler", choices=[True, False], default_value=True) cs.add_hyperparameter(switch) def __init__(self): self.scaler = None self.active = False self.logger = logging.getLogger("StandardScaler")
BSD 2-Clause Simplified License
capitalone/rubicon
rubicon_ml/repository/base.py
BaseRepository.create_artifact
python
def create_artifact(self, artifact, data, project_name, experiment_id=None): artifact_metadata_path = self._get_artifact_metadata_path( project_name, experiment_id, artifact.id ) artifact_data_path = self._get_artifact_data_path(project_name, experiment_id, artifact.id) self._persist_bytes(data, artifact_data_path) self._persist_domain(artifact, artifact_metadata_path)
Persist an artifact to the configured filesystem. Parameters ---------- artifact : rubicon.domain.Artifact The artifact to persist. data : bytes The raw data to persist as an artifact. project_name : str The name of the project this artifact belongs to. experiment_id : str, optional The ID of the experiment this artifact belongs to. Artifacts do not need to belong to an experiment.
https://github.com/capitalone/rubicon/blob/86278a98cf5fd0b7e179a2949fce5a12e42fd7be/rubicon_ml/repository/base.py#L247-L268
import os import warnings from pathlib import Path import fsspec import pandas as pd from dask import dataframe as dd from rubicon_ml import domain from rubicon_ml.exceptions import RubiconException from rubicon_ml.repository.utils import json, slugify class BaseRepository: def __init__(self, root_dir, **storage_options): self.filesystem = fsspec.filesystem(self.PROTOCOL, **storage_options) self.root_dir = root_dir.rstrip("/") def _ls_directories_only(self, path): directories = [ os.path.join(p.get("name"), "metadata.json") for p in self.filesystem.ls(path, detail=True) if p.get("type", p.get("StorageClass")).lower() == "directory" ] return directories def _cat_paths(self, metadata_paths): files = [] for path, metadata in self.filesystem.cat(metadata_paths, on_error="return").items(): if isinstance(metadata, FileNotFoundError): warning = f"{path} not found. Was this file unintentionally created?" warnings.warn(warning) else: files.append(metadata) return files def _get_project_metadata_path(self, project_name): return f"{self.root_dir}/{slugify(project_name)}/metadata.json" def create_project(self, project): project_metadata_path = self._get_project_metadata_path(project.name) if self.filesystem.exists(project_metadata_path): raise RubiconException(f"A project with name '{project.name}' already exists.") self._persist_domain(project, project_metadata_path) def get_project(self, project_name): project_metadata_path = self._get_project_metadata_path(project_name) try: project = json.loads(self.filesystem.cat(project_metadata_path)) except FileNotFoundError: raise RubiconException(f"No project with name '{project_name}' found.") return domain.Project(**project) def get_projects(self): try: project_metadata_paths = self._ls_directories_only(self.root_dir) projects = [ domain.Project(**json.loads(metadata)) for metadata in self._cat_paths(project_metadata_paths) ] projects.sort(key=lambda a: a.created_at) except FileNotFoundError: return [] return projects def _get_experiment_metadata_root(self, project_name): return f"{self.root_dir}/{slugify(project_name)}/experiments" def _get_experiment_metadata_path(self, project_name, experiment_id): experiment_metadata_root = self._get_experiment_metadata_root(project_name) return f"{experiment_metadata_root}/{experiment_id}/metadata.json" def create_experiment(self, experiment): experiment_metadata_path = self._get_experiment_metadata_path( experiment.project_name, experiment.id ) self._persist_domain(experiment, experiment_metadata_path) def get_experiment(self, project_name, experiment_id): experiment_metadata_path = self._get_experiment_metadata_path(project_name, experiment_id) try: open_file = self.filesystem.open(experiment_metadata_path) except FileNotFoundError: raise RubiconException(f"No experiment with id `{experiment_id}` found.") with open_file as f: experiment = json.load(f) return domain.Experiment(**experiment) def get_experiments(self, project_name): experiment_metadata_root = self._get_experiment_metadata_root(project_name) try: experiment_metadata_paths = self._ls_directories_only(experiment_metadata_root) experiments = [ domain.Experiment(**json.loads(metadata)) for metadata in self._cat_paths(experiment_metadata_paths) ] experiments.sort(key=lambda a: a.created_at) except FileNotFoundError: return [] return experiments def _get_artifact_metadata_root(self, project_name, experiment_id=None): if experiment_id is not None: experiment_metadata_root = self._get_experiment_metadata_root(project_name) return f"{experiment_metadata_root}/{experiment_id}/artifacts" else: return f"{self.root_dir}/{slugify(project_name)}/artifacts" def _get_artifact_metadata_path(self, project_name, experiment_id, artifact_id): artifact_metadata_root = self._get_artifact_metadata_root(project_name, experiment_id) return f"{artifact_metadata_root}/{artifact_id}/metadata.json" def _get_artifact_data_path(self, project_name, experiment_id, artifact_id): artifact_metadata_root = self._get_artifact_metadata_root(project_name, experiment_id) return f"{artifact_metadata_root}/{artifact_id}/data"
Apache License 2.0
cloud-bulldozer/benchmark-wrapper
snafu/fio_wrapper/fio_analyzer.py
Fio_Analyzer.add_fio_result_documents
python
def add_fio_result_documents(self, document_list, starttime): for document in document_list: fio_result = {} fio_result["document"] = document fio_result["starttime"] = starttime self.fio_processed_results_list.append(fio_result)
for each new document add it to the results list with its starttime
https://github.com/cloud-bulldozer/benchmark-wrapper/blob/032b0920397888bbae1a62ca27fae28d1be4537c/snafu/fio_wrapper/fio_analyzer.py#L23-L31
import statistics import time class Fio_Analyzer: def __init__(self, uuid, user, cluster_name): self.uuid = uuid self.user = user self.fio_processed_results_list = [] self.sample_list = [] self.operation_list = [] self.io_size_list = [] self.sumdoc = {} self.cluster_name = cluster_name
Apache License 2.0
chronicle/detection-api
detect/v2/delete_rule.py
delete_rule
python
def delete_rule(http_session: requests.AuthorizedSession, rule_id: str): url = f"{CHRONICLE_API_BASE_URL}/v2/detect/rules/{rule_id}" response = http_session.request("DELETE", url) if response.status_code >= 400: print(response.text) response.raise_for_status()
Delete a specific detection rule. Args: http_session: Authorized session for HTTP requests. rule_id: Unique ID of the detection rule to delete ("ru_<UUID>"). It does not accept version id format ("ru_<UUID>@v_<seconds>_<nanoseconds>"). Raises: requests.exceptions.HTTPError: HTTP request resulted in an error (response.status_code >= 400).
https://github.com/chronicle/detection-api/blob/f0ec1f837c0c1e6af68d003dcd6c5774e524bba5/detect/v2/delete_rule.py#L33-L53
import argparse from google.auth.transport import requests from common import chronicle_auth from common import regions CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
Apache License 2.0
intel/openfl
openfl/cryptography/participant.py
generate_csr
python
def generate_csr(common_name, server=False): private_key = rsa.generate_private_key( public_exponent=65537, key_size=3072, backend=default_backend() ) builder = x509.CertificateSigningRequestBuilder() subject = x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, common_name), ]) builder = builder.subject_name(subject) builder = builder.add_extension( x509.BasicConstraints(ca=False, path_length=None), critical=True, ) if server: builder = builder.add_extension( x509.ExtendedKeyUsage([x509.ExtendedKeyUsageOID.SERVER_AUTH]), critical=True ) else: builder = builder.add_extension( x509.ExtendedKeyUsage([x509.ExtendedKeyUsageOID.CLIENT_AUTH]), critical=True ) builder = builder.add_extension( x509.KeyUsage( digital_signature=True, key_encipherment=True, data_encipherment=False, key_agreement=False, content_commitment=False, key_cert_sign=False, crl_sign=False, encipher_only=False, decipher_only=False ), critical=True ) builder = builder.add_extension( x509.SubjectAlternativeName([x509.DNSName(common_name)]), critical=False ) csr = builder.sign( private_key=private_key, algorithm=hashes.SHA384(), backend=default_backend() ) return private_key, csr
Issue certificate signing request for server and client.
https://github.com/intel/openfl/blob/4bda3850b6bce7c904a5ac3ed56115bec00be2e0/openfl/cryptography/participant.py#L13-L68
from cryptography import x509 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID
Apache License 2.0
jgraving/deepposekit
deepposekit/models/LEAP.py
LEAP.__init__
python
def __init__( self, train_generator, filters=64, upsampling=False, activation="relu", batchnorm=False, use_bias=True, pooling="max", interpolation="bilinear", subpixel=False, initializer="glorot_uniform", **kwargs ): self.filters = filters self.upsampling = upsampling self.activation = activation if activation is "selu": batchnorm = False use_bias = False if batchnorm: use_bias = False self.batchnorm = batchnorm self.use_bias = use_bias self.pooling = pooling self.interpolation = interpolation self.subpixel = subpixel self.initializer = initializer super(LEAP, self).__init__(train_generator, subpixel, **kwargs)
Define a LEAP model from Pereira et al., 2018 [1] See `References` for details on the model architecture. Parameters ---------- train_generator : class deepposekit.io.TrainingGenerator A deepposekit.io.TrainingGenerator class for generating images and confidence maps. filters : int, default = 64 The base number of channels to output from each convolutional layer. Increases by up to a factor of 4 with network depth. upsampling_layers: bool, default = False Whether to use upsampling or transposed convolutions for upsampling layers. Default is False, which uses transposed convolutions. activation: str or callable, default = 'relu' The activation function to use for each convolutional layer. batchnorm : bool, default = False Whether to use batch normalization in each convolutional block. If activation is 'selu' then batchnorm is automatically set to False, as the network is already self-normalizing. pooling: str, default = 'max' The type of pooling to use during downsampling. Must be either 'max' or 'average'. interpolation: str, default = 'nearest' The type of interpolation to use when upsampling. Must be 'nearest', 'bilinear', or 'bicubic'. The default is 'nearest', which is the most efficient. subpixel: bool, default = True Whether to use subpixel maxima for calculating keypoint coordinates in the prediction model. initializer: str or callable, default='glorot_uniform' The initializer for the convolutional kernels. Default is 'glorot_uniform' which is the keras default. If activation is 'selu', the initializer is automatically changed to 'lecun_normal', which is the recommended initializer for that activation function [4]. Attributes ------- train_model: keras.Model A model for training the network to produce confidence maps with one input layer for images predict_model: keras.Model A model for predicting keypoint coordinates using with Maxima2D or SubpixelMaxima2D layers at the output of the network. Both of these models share the same computational graph, so training train_model updates the weights of predict_model References ---------- 1. Pereira, T. D., Aldarondo, D. E., Willmore, L., Kislin, M., Wang, S. S. H., Murthy, M., & Shaevitz, J. W. (2019). Fast animal pose estimation using deep neural networks. Nature methods, 16(1), 117.
https://github.com/jgraving/deepposekit/blob/cecdb0c8c364ea049a3b705275ae71a2f366d4da/deepposekit/models/LEAP.py#L26-L115
from tensorflow.keras import Input, Model from tensorflow.keras.layers import Conv2D, Conv2DTranspose, BatchNormalization from deepposekit.models.layers.convolutional import UpSampling2D from deepposekit.models.layers.util import ImageNormalization from deepposekit.models.layers.leap import ConvBlock2D, ConvPool2D from deepposekit.models.engine import BaseModel class LEAP(BaseModel):
Apache License 2.0
halit/isip
isip/scapy/contrib/gsm_um.py
systemInformationType2
python
def systemInformationType2(): a = L2PseudoLength(l2pLength=0x16) b = TpPd(pd=0x6) c = MessageType(mesType=0x1a) d = NeighbourCellsDescription() e = NccPermitted() f = RachControlParameters() packet = a / b / c / d / e / f return packet
SYSTEM INFORMATION TYPE 2 Section 9.1.32
https://github.com/halit/isip/blob/fad1f10b02f9e075451588cc6a18a46cc5fbd66b/isip/scapy/contrib/gsm_um.py#L997-L1006
import logging from types import IntType from types import NoneType from types import StringType import socket logging.getLogger("scapy").setLevel(1) from scapy.all import * def sendum(x, typeSock=0): try: if type(x) is not str: x = str(x) if typeSock is 0: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) host = '127.0.0.1' port = 28670 s.connect((host, port)) elif typeSock is 1: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) s.connect("/tmp/osmoL") elif typeSock is 2: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = '127.0.0.1' port = 43797 s.connect((host, port)) s.send(x) s.close() except: print "[Error]: There was a problem when trying to transmit data.\ Please make sure you started the socket server." class ErrorLength(Exception): def __str__(self): error = "ERROR: Please make sure you build entire, 8 bit fields." return repr(error) def adapt(min_length, max_length, fields, fields2, location=2): location = min_length - location i = len(fields) - 1 rm = mysum = 0 while i >= 0: if fields[i] is None: rm += 1 try: mysum += fields2[i].size except AttributeError: mysum += 8 else: break i -= 1 if mysum % 8 is 0: length = mysum / 8 dyn_length = (max_length - min_length - length) if dyn_length < 0: dyn_length = 0 if length is max_length: length -= min_length return [length, dyn_length + location] else: raise ErrorLength() def examples(example=None): if example == None: print """This command presents some example to introduce scapy gsm-um to new users. The following parameters can be used: examples("imsiDetach") examples("call") examples("dissect")""" elif example == "imsiDetach": print """ >>> a=imsiDetachIndication() ... a.typeOfId=1; a.odd=1; a.idDigit1=0xF; ... a.idDigit2_1=2; a.idDigit2=7; a.idDigit3_1=0; ... a.idDigit3=7; a.idDigit4_1=7; a.idDigit4=2; ... a.idDigit5_1=0; a.idDigit5=0; a.idDigit6_1=0; ... a.idDigit6=1; a.idDigit7_1=2; a.idDigit7=7; ... a.idDigit8_1=7; a.idDigit8=5; a.idDigit9_1=1; a.idDigit9=4; >>> hexdump(a) 0000 05 01 00 08 F0 27 07 72 00 01 27 75 14 .....'.r..'u. >>> sendum(a) """ elif example == "call": print """ If you use an USRP and the testcall function this sets up a phonecall: >>> sendum(setupMobileOriginated()) >>> sendum(connectAcknowledge()) """ class TpPd(Packet): name = "Skip Indicator And Transaction Identifier and Protocol \ Discriminator" fields_desc = [ BitField("ti", 0x0, 4), BitField("pd", 0x3, 4) ] class MessageType(Packet): name = "Message Type" fields_desc = [ XByteField("mesType", 0x3C) ] def additionalAssignment(MobileAllocation_presence=0, StartingTime_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x3B) c = ChannelDescription() packet = a / b / c if MobileAllocation_presence is 1: d = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / d if StartingTime_presence is 1: e = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / e return packet def assignmentCommand(FrequencyList_presence=0, CellChannelDescription_presence=0, CellChannelDescription_presence1=0, MultislotAllocation_presence=0, ChannelMode_presence=0, ChannelMode_presence1=0, ChannelMode_presence2=0, ChannelMode_presence3=0, ChannelMode_presence4=0, ChannelMode_presence5=0, ChannelMode_presence6=0, ChannelMode_presence7=0, ChannelDescription=0, ChannelMode2_presence=0, MobileAllocation_presence=0, StartingTime_presence=0, FrequencyList_presence1=0, ChannelDescription2_presence=0, ChannelDescription_presence=0, FrequencyChannelSequence_presence=0, MobileAllocation_presence1=0, CipherModeSetting_presence=0, VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x2e) c = ChannelDescription2() d = PowerCommand() packet = a / b / c / d if FrequencyList_presence is 1: e = FrequencyListHdr(ieiFL=0x05, eightBitFL=0x0) packet = packet / e if CellChannelDescription_presence is 1: f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0) packet = packet / f if MultislotAllocation_presence is 1: g = MultislotAllocationHdr(ieiMSA=0x10, eightBitMSA=0x0) packet = packet / g if ChannelMode_presence is 1: h = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0) packet = packet / h if ChannelMode_presence1 is 1: i = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0) packet = packet / i if ChannelMode_presence2 is 1: j = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0) packet = packet / j if ChannelMode_presence3 is 1: k = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0) packet = packet / k if ChannelMode_presence4 is 1: l = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0) packet = packet / l if ChannelMode_presence5 is 1: m = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0) packet = packet / m if ChannelMode_presence6 is 1: n = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0) packet = packet / n if ChannelMode_presence7 is 1: o = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0) packet = packet / o if ChannelDescription_presence is 1: p = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0) packet = packet / p if ChannelMode2_presence is 1: q = ChannelMode2Hdr(ieiCM2=0x66, eightBitCM2=0x0) packet = packet / q if MobileAllocation_presence is 1: r = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / r if StartingTime_presence is 1: s = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / s if FrequencyList_presence1 is 1: t = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0) packet = packet / t if ChannelDescription2_presence is 1: u = ChannelDescription2Hdr(ieiCD2=0x1C, eightBitCD2=0x0) packet = packet / u if ChannelDescription_presence is 1: v = ChannelDescriptionHdr(ieiCD=0x1D, eightBitCD=0x0) packet = packet / v if FrequencyChannelSequence_presence is 1: w = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0) packet = packet / w if MobileAllocation_presence1 is 1: x = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0) packet = packet / x if CipherModeSetting_presence is 1: y = CipherModeSettingHdr(ieiCMS=0x9, eightBitCMS=0x0) packet = packet / y if VgcsTargetModeIdentication_presence is 1: z = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / z if MultiRateConfiguration_presence is 1: aa = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / aa return packet def assignmentComplete(): a = TpPd(pd=0x6) b = MessageType(mesType=0x29) c = RrCause() packet = a / b / c return packet def assignmentFailure(): a = TpPd(pd=0x6) b = MessageType(mesType=0x2F) c = RrCause() packet = a / b / c return packet def channelModeModify(VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x8) c = ChannelDescription2() d = ChannelMode() packet = a / b / c / d if VgcsTargetModeIdentication is 1: e = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / e if MultiRateConfiguration is 1: f = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / f return packet def channelModeModifyAcknowledge(): a = TpPd(pd=0x6) b = MessageType(mesType=0x17) c = ChannelDescription2() d = ChannelMode() packet = a / b / c / d return packet def channelRelease(BaRange_presence=0, GroupChannelDescription_presence=0, GroupCipherKeyNumber_presence=0, GprsResumption_presence=0, BaListPref_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0xD) c = RrCause() packet = a / b / c if BaRange_presence is 1: d = BaRangeHdr(ieiBR=0x73, eightBitBR=0x0) packet = packet / d if GroupChannelDescription_presence is 1: e = GroupChannelDescriptionHdr(ieiGCD=0x74, eightBitGCD=0x0) packet = packet / e if GroupCipherKeyNumber_presence is 1: f = GroupCipherKeyNumber(ieiGCKN=0x8) packet = packet / f if GprsResumption_presence is 1: g = GprsResumptionHdr(ieiGR=0xC, eightBitGR=0x0) packet = packet / g if BaListPref_presence is 1: h = BaListPrefHdr(ieiBLP=0x75, eightBitBLP=0x0) packet = packet / h return packet class ChannelRequest(Packet): name = "Channel Request" fields_desc = [ ByteField("estCause", 0x0) ] def channelRequest(): return ChannelRequest() def cipheringModeCommand(): a = TpPd(pd=0x6) b = MessageType(mesType=0x35) c = RrCause() d = CipherModeSettingAndcipherResponse() packet = a / b / c / d return packet def cipheringModeComplete(MobileId_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x32) packet = a / b if MobileId_presence is 1: c = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / c return packet def classmarkChange(MobileStationClassmark3_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x16) c = MobileStationClassmark2() packet = a / b / c if MobileStationClassmark3_presence is 1: e = MobileStationClassmark3(ieiMSC3=0x20) packet = packet / e return packet def classmarkEnquiry(): a = TpPd(pd=0x6) b = MessageType(mesType=0x13) packet = a / b return packet def configurationChangeCommand(ChannelMode_presence=0, ChannelMode_presence1=0, ChannelMode_presence2=0, ChannelMode_presence3=0, ChannelMode_presence4=0, ChannelMode_presence5=0, ChannelMode_presence6=0, ChannelMode_presence7=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x30) c = MultislotAllocation() packet = a / b / c if ChannelMode_presence is 1: d = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0) packet = packet / d if ChannelMode_presence1 is 1: e = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0) packet = packet / e if ChannelMode_presence2 is 1: f = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0) packet = packet / f if ChannelMode_presence3 is 1: g = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0) packet = packet / g if ChannelMode_presence4 is 1: h = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0) packet = packet / h if ChannelMode_presence5 is 1: i = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0) packet = packet / i if ChannelMode_presence6 is 1: j = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0) packet = packet / j if ChannelMode_presence7 is 1: k = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0) packet = packet / k return packet def configurationChangeAcknowledge(): a = TpPd(pd=0x6) b = MessageType(mesType=0x31) c = MobileId() packet = a / b / c return packet def configurationChangeReject(): a = TpPd(pd=0x6) b = MessageType(mesType=0x33) c = RrCause() packet = a / b / c return packet def frequencyRedefinition(CellChannelDescription_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x14) c = ChannelDescription() d = MobileAllocation() e = StartingTime() packet = a / b / c / d / e if CellChannelDescription_presence is 1: f = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0) packet = packet / f return packet def pdchAssignmentCommand(ChannelDescription_presence=0, CellChannelDescription_presence=0, MobileAllocation_presence=0, StartingTime_presence=0, FrequencyList_presence=0, ChannelDescription_presence1=0, FrequencyChannelSequence_presence=0, MobileAllocation_presence1=0, PacketChannelDescription_presence=0, DedicatedModeOrTBF_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x23) c = ChannelDescription() packet = a / b / c if ChannelDescription_presence is 1: d = ChannelDescriptionHdr(ieiCD=0x62, eightBitCD=0x0) packet = packet / d if CellChannelDescription_presence is 1: e = CellChannelDescriptionHdr(ieiCCD=0x05, eightBitCCD=0x0) packet = packet / e if MobileAllocation_presence is 1: f = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / f if StartingTime_presence is 1: g = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / g if FrequencyList_presence is 1: h = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0) packet = packet / h if ChannelDescription_presence1 is 1: i = ChannelDescriptionHdr(ieiCD=0x1C, eightBitCD=0x0) packet = packet / i if FrequencyChannelSequence_presence is 1: j = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0) packet = packet / j if MobileAllocation_presence1 is 1: k = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0) packet = packet / k if PacketChannelDescription_presence is 1: l = PacketChannelDescription(ieiPCD=0x22) packet = packet / l if DedicatedModeOrTBF_presence is 1: m = DedicatedModeOrTBFHdr(ieiDMOT=0x23, eightBitDMOT=0x0) packet = packet / m return packet def gprsSuspensionRequest(): a = TpPd(pd=0x6) b = MessageType() c = Tlli() d = RoutingAreaIdentification() e = SuspensionCause() packet = a / b / c / d / e return packet class HandoverAccess(Packet): name = "Handover Access" fields_desc = [ ByteField("handover", None), ] def handoverCommand(SynchronizationIndication_presence=0, FrequencyShortList_presence=0, FrequencyList_presence=0, CellChannelDescription_presence=0, MultislotAllocation_presence=0, ChannelMode_presence=0, ChannelMode_presence1=0, ChannelMode_presence2=0, ChannelMode_presence3=0, ChannelMode_presence4=0, ChannelMode_presence5=0, ChannelMode_presence6=0, ChannelMode_presence7=0, ChannelDescription_presence1=0, ChannelMode2_presence=0, FrequencyChannelSequence_presence=0, MobileAllocation_presence=0, StartingTime_presence=0, TimeDifference_presence=0, TimingAdvance_presence=0, FrequencyShortList_presence1=0, FrequencyList_presence1=0, ChannelDescription2_presence=0, ChannelDescription_presence2=0, FrequencyChannelSequence_presence1=0, MobileAllocation_presence1=0, CipherModeSetting_presence=0, VgcsTargetModeIdentication_presence=0, MultiRateConfiguration_presence=0): name = "Handover Command" a = TpPd(pd=0x6) b = MessageType(mesType=0x2b) c = CellDescription() d = ChannelDescription2() e = HandoverReference() f = PowerCommandAndAccessType() packet = a / b / c / d / e / f if SynchronizationIndication_presence is 1: g = SynchronizationIndicationHdr(ieiSI=0xD, eightBitSI=0x0) packet = packet / g if FrequencyShortList_presence is 1: h = FrequencyShortListHdr(ieiFSL=0x02) packet = packet / h if FrequencyList_presence is 1: i = FrequencyListHdr(ieiFL=0x05, eightBitFL=0x0) packet = packet / i if CellChannelDescription_presence is 1: j = CellChannelDescriptionHdr(ieiCCD=0x62, eightBitCCD=0x0) packet = packet / j if MultislotAllocation_presence is 1: k = MultislotAllocationHdr(ieiMSA=0x10, eightBitMSA=0x0) packet = packet / k if ChannelMode_presence is 1: l = ChannelModeHdr(ieiCM=0x63, eightBitCM=0x0) packet = packet / l if ChannelMode_presence1 is 1: m = ChannelModeHdr(ieiCM=0x11, eightBitCM=0x0) packet = packet / m if ChannelMode_presence2 is 1: n = ChannelModeHdr(ieiCM=0x13, eightBitCM=0x0) packet = packet / n if ChannelMode_presence3 is 1: o = ChannelModeHdr(ieiCM=0x14, eightBitCM=0x0) packet = packet / o if ChannelMode_presence4 is 1: p = ChannelModeHdr(ieiCM=0x15, eightBitCM=0x0) packet = packet / p if ChannelMode_presence5 is 1: q = ChannelModeHdr(ieiCM=0x16, eightBitCM=0x0) packet = packet / q if ChannelMode_presence6 is 1: r = ChannelModeHdr(ieiCM=0x17, eightBitCM=0x0) packet = packet / r if ChannelMode_presence7 is 1: s = ChannelModeHdr(ieiCM=0x18, eightBitCM=0x0) packet = packet / s if ChannelDescription_presence1 is 1: s1 = ChannelDescriptionHdr(ieiCD=0x64, eightBitCD=0x0) packet = packet / s1 if ChannelMode2_presence is 1: t = ChannelMode2Hdr(ieiCM2=0x66, eightBitCM2=0x0) packet = packet / t if FrequencyChannelSequence_presence is 1: u = FrequencyChannelSequenceHdr(ieiFCS=0x69, eightBitFCS=0x0) packet = packet / u if MobileAllocation_presence is 1: v = MobileAllocationHdr(ieiMA=0x72, eightBitMA=0x0) packet = packet / v if StartingTime_presence is 1: w = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / w if TimeDifference_presence is 1: x = TimeDifferenceHdr(ieiTD=0x7B, eightBitTD=0x0) packet = packet / x if TimingAdvance_presence is 1: y = TimingAdvanceHdr(ieiTA=0x7D, eightBitTA=0x0) packet = packet / y if FrequencyShortList_presence1 is 1: z = FrequencyShortListHdr(ieiFSL=0x12) packet = packet / z if FrequencyList_presence1 is 1: aa = FrequencyListHdr(ieiFL=0x19, eightBitFL=0x0) packet = packet / aa if ChannelDescription2_presence is 1: ab = ChannelDescription2Hdr(ieiCD2=0x1C, eightBitCD2=0x0) packet = packet / ab if ChannelDescription_presence2 is 1: ac = ChannelDescriptionHdr(ieiCD=0x1D, eightBitCD=0x0) packet = packet / ac if FrequencyChannelSequence_presence1 is 1: ad = FrequencyChannelSequenceHdr(ieiFCS=0x1E, eightBitFCS=0x0) packet = packet / ad if MobileAllocation_presence1 is 1: ae = MobileAllocationHdr(ieiMA=0x21, eightBitMA=0x0) packet = packet / ae if CipherModeSetting_presence is 1: af = CipherModeSettingHdr(ieiCMS=0x9, eightBitCMS=0x0) packet = packet / af if VgcsTargetModeIdentication_presence is 1: ag = VgcsTargetModeIdenticationHdr(ieiVTMI=0x01, eightBitVTMI=0x0) packet = packet / ag if MultiRateConfiguration_presence is 1: ah = MultiRateConfigurationHdr(ieiMRC=0x03, eightBitMRC=0x0) packet = packet / ah return packet def handoverComplete(MobileTimeDifference_presence=0): a = TpPd(pd=0x6) b = MessageType(mesType=0x2c) c = RrCause() packet = a / b / c if MobileTimeDifference_presence is 1: d = MobileTimeDifferenceHdr(ieiMTD=0x77, eightBitMTD=0x0) packet = packet / d return packet def handoverFailure(): a = TpPd(pd=0x6) b = MessageType(mesType=0x28) c = RrCause() packet = a / b / c return packet def immediateAssignment(ChannelDescription_presence=0, PacketChannelDescription_presence=0, StartingTime_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x3F) d = PageModeAndDedicatedModeOrTBF() packet = a / b / c / d if ChannelDescription_presence is 1: f = ChannelDescription() packet = packet / f if PacketChannelDescription_presence is 1: g = PacketChannelDescription() packet = packet / g h = RequestReference() i = TimingAdvance() j = MobileAllocation() packet = packet / h / i / j if StartingTime_presence is 1: k = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / k l = IaRestOctets() packet = packet / l return packet def immediateAssignmentExtended(StartingTime_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x39) d = PageModeAndSpareHalfOctets() f = ChannelDescription() g = RequestReference() h = TimingAdvance() i = MobileAllocation() packet = a / b / c / d / f / g / h / i if StartingTime_presence is 1: j = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / j k = IaxRestOctets() packet = packet / k return packet def immediateAssignmentReject(): a = L2PseudoLength(l2pLength=0x13) b = TpPd(pd=0x6) c = MessageType(mesType=0x3a) d = PageModeAndSpareHalfOctets() f = RequestReference() g = WaitIndication() h = RequestReference() i = WaitIndication() j = RequestReference() k = WaitIndication() l = RequestReference() m = WaitIndication() n = IraRestOctets() packet = a / b / c / d / f / g / h / i / j / k / l / m / n return packet def measurementReport(): a = TpPd(pd=0x6) b = MessageType(mesType=0x15) c = MeasurementResults() packet = a / b / c return packet class NotificationFacch(): name = "Notification/facch" fields_desc = [ BitField("rr", 0x0, 1), BitField("msgTyoe", 0x0, 5), BitField("layer2Header", 0x0, 2), BitField("frChanDes", 0x0, 24) ] def notificationNch(): a = L2PseudoLength(l2pLength=0x01) b = TpPd(pd=0x6) c = MessageType(mesType=0x20) d = NtNRestOctets() packet = a / b / c / d return packet def notificationResponse(): a = TpPd(pd=0x6) b = MessageType(mesType=0x26) c = MobileStationClassmark2() d = MobileId() e = DescriptiveGroupOrBroadcastCallReference() packet = a / b / c / d / e return packet def rrCellChangeOrder(): a = TpPd(pd=0x6) b = MessageType(mesType=0x8) c = CellDescription() d = NcModeAndSpareHalfOctets() packet = a / b / c / d return packet def pagingRequestType1(MobileId_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x21) d = PageModeAndChannelNeeded() f = MobileId() packet = a / b / c / d / f if MobileId_presence is 1: g = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / g h = P1RestOctets() packet = packet / h return packet def pagingRequestType2(MobileId_presence=0): a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x22) d = PageModeAndChannelNeeded() f = MobileId() g = MobileId() packet = a / b / c / d / f / g if MobileId_presence is 1: h = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / h i = P2RestOctets() packet = packet / i return packet def pagingRequestType3(): a = L2PseudoLength(l2pLength=0x13) b = TpPd(pd=0x6) c = MessageType(mesType=0x24) d = PageModeAndChannelNeeded() e = TmsiPTmsi() f = TmsiPTmsi() g = TmsiPTmsi() h = TmsiPTmsi() i = P3RestOctets() packet = a / b / c / d / e / f / g / h / i return packet def pagingResponse(): a = TpPd(pd=0x6) b = MessageType(mesType=0x27) c = CiphKeySeqNrAndSpareHalfOctets() d = MobileStationClassmark2() e = MobileId() packet = a / b / c / d / e return packet def partialRelease(): a = TpPd(pd=0x6) b = MessageType(mesType=0xa) c = ChannelDescription() packet = a / b / c return packet def partialReleaseComplete(): a = TpPd(pd=0x6) b = MessageType(mesType=0xf) packet = a / b return packet def physicalInformation(): a = TpPd(pd=0x6) b = MessageType(mesType=0x2d) c = TimingAdvance() packet = a / b / c return packet def rrInitialisationRequest(): a = TpPd(pd=0x6) b = MessageType(mesType=0x3c) c = CiphKeySeqNrAndMacModeAndChannelCodingRequest() e = MobileStationClassmark2() f = Tlli() g = ChannelRequestDescription() h = GprsMeasurementResults() packet = a / b / c / e / f / g / h return packet def rrStatus(): a = TpPd(pd=0x6) b = MessageType(mesType=0x12) c = RrCause() packet = a / b / c return packet class SynchronizationChannelInformation(): name = "Synchronization Channel Information" fields_desc = [ BitField("bsic", 0x0, 5), BitField("t1Hi", 0x0, 3), ByteField("t1Mi", 0x0), BitField("t1Lo", 0x0, 1), BitField("t2", 0x0, 5), BitField("t3Hi", 0x0, 2), BitField("t3Lo", 0x0, 1) ] def systemInformationType1(): a = L2PseudoLength(l2pLength=0x15) b = TpPd(pd=0x6) c = MessageType(mesType=0x19) d = CellChannelDescription() e = RachControlParameters() f = Si1RestOctets() packet = a / b / c / d / e / f return packet
MIT License
sergioteula/python-amazon-paapi
amazon/paapi5_python_sdk/offer_listing.py
OfferListing.price
python
def price(self): return self._price
Gets the price of this OfferListing. # noqa: E501 :return: The price of this OfferListing. # noqa: E501 :rtype: OfferPrice
https://github.com/sergioteula/python-amazon-paapi/blob/9cb744bef17f5127231367430191df12126e9c24/amazon/paapi5_python_sdk/offer_listing.py#L273-L280
import pprint import re import six from .offer_availability import OfferAvailability from .offer_condition import OfferCondition from .offer_delivery_info import OfferDeliveryInfo from .offer_loyalty_points import OfferLoyaltyPoints from .offer_merchant_info import OfferMerchantInfo from .offer_price import OfferPrice from .offer_program_eligibility import OfferProgramEligibility from .offer_promotion import OfferPromotion class OfferListing(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'availability': 'OfferAvailability', 'condition': 'OfferCondition', 'delivery_info': 'OfferDeliveryInfo', 'id': 'str', 'is_buy_box_winner': 'bool', 'loyalty_points': 'OfferLoyaltyPoints', 'merchant_info': 'OfferMerchantInfo', 'price': 'OfferPrice', 'program_eligibility': 'OfferProgramEligibility', 'promotions': 'list[OfferPromotion]', 'saving_basis': 'OfferPrice', 'violates_map': 'bool' } attribute_map = { 'availability': 'Availability', 'condition': 'Condition', 'delivery_info': 'DeliveryInfo', 'id': 'Id', 'is_buy_box_winner': 'IsBuyBoxWinner', 'loyalty_points': 'LoyaltyPoints', 'merchant_info': 'MerchantInfo', 'price': 'Price', 'program_eligibility': 'ProgramEligibility', 'promotions': 'Promotions', 'saving_basis': 'SavingBasis', 'violates_map': 'ViolatesMAP' } def __init__(self, availability=None, condition=None, delivery_info=None, id=None, is_buy_box_winner=None, loyalty_points=None, merchant_info=None, price=None, program_eligibility=None, promotions=None, saving_basis=None, violates_map=None): self._availability = None self._condition = None self._delivery_info = None self._id = None self._is_buy_box_winner = None self._loyalty_points = None self._merchant_info = None self._price = None self._program_eligibility = None self._promotions = None self._saving_basis = None self._violates_map = None self.discriminator = None if availability is not None: self.availability = availability if condition is not None: self.condition = condition if delivery_info is not None: self.delivery_info = delivery_info if id is not None: self.id = id if is_buy_box_winner is not None: self.is_buy_box_winner = is_buy_box_winner if loyalty_points is not None: self.loyalty_points = loyalty_points if merchant_info is not None: self.merchant_info = merchant_info if price is not None: self.price = price if program_eligibility is not None: self.program_eligibility = program_eligibility if promotions is not None: self.promotions = promotions if saving_basis is not None: self.saving_basis = saving_basis if violates_map is not None: self.violates_map = violates_map @property def availability(self): return self._availability @availability.setter def availability(self, availability): self._availability = availability @property def condition(self): return self._condition @condition.setter def condition(self, condition): self._condition = condition @property def delivery_info(self): return self._delivery_info @delivery_info.setter def delivery_info(self, delivery_info): self._delivery_info = delivery_info @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def is_buy_box_winner(self): return self._is_buy_box_winner @is_buy_box_winner.setter def is_buy_box_winner(self, is_buy_box_winner): self._is_buy_box_winner = is_buy_box_winner @property def loyalty_points(self): return self._loyalty_points @loyalty_points.setter def loyalty_points(self, loyalty_points): self._loyalty_points = loyalty_points @property def merchant_info(self): return self._merchant_info @merchant_info.setter def merchant_info(self, merchant_info): self._merchant_info = merchant_info @property
MIT License
pelioniot/mbed-cloud-sdk-python
src/mbed_cloud/_backends/update_service/models/firmware_image_eq_neq_filter.py
FirmwareImageEqNeqFilter.datafile
python
def datafile(self, datafile): self._datafile = datafile
Sets the datafile of this FirmwareImageEqNeqFilter. :param datafile: The datafile of this FirmwareImageEqNeqFilter. :type: str
https://github.com/pelioniot/mbed-cloud-sdk-python/blob/71dc67fc2a8d1aff31e35ec781fb328e6a60639c/src/mbed_cloud/_backends/update_service/models/firmware_image_eq_neq_filter.py#L105-L113
from pprint import pformat from six import iteritems import re class FirmwareImageEqNeqFilter(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'created_at': 'datetime', 'datafile': 'str', 'datafile_checksum': 'str', 'datafile_size': 'int', 'description': 'str', 'etag': 'datetime', 'id': 'str', 'name': 'str', 'updated_at': 'datetime' } attribute_map = { 'created_at': 'created_at', 'datafile': 'datafile', 'datafile_checksum': 'datafile_checksum', 'datafile_size': 'datafile_size', 'description': 'description', 'etag': 'etag', 'id': 'id', 'name': 'name', 'updated_at': 'updated_at' } def __init__(self, created_at=None, datafile=None, datafile_checksum=None, datafile_size=None, description=None, etag=None, id=None, name=None, updated_at=None): self._created_at = created_at self._datafile = datafile self._datafile_checksum = datafile_checksum self._datafile_size = datafile_size self._description = description self._etag = etag self._id = id self._name = name self._updated_at = updated_at self.discriminator = None @property def created_at(self): return self._created_at @created_at.setter def created_at(self, created_at): self._created_at = created_at @property def datafile(self): return self._datafile @datafile.setter
Apache License 2.0
autorope/donkeycar
donkeycar/parts/keras.py
conv2d
python
def conv2d(filters, kernel, strides, layer_num, activation='relu'): return Convolution2D(filters=filters, kernel_size=(kernel, kernel), strides=(strides, strides), activation=activation, name='conv2d_' + str(layer_num))
Helper function to create a standard valid-padded convolutional layer with square kernel and strides and unified naming convention :param filters: channel dimension of the layer :param kernel: creates (kernel, kernel) kernel matrix dimension :param strides: creates (strides, strides) stride :param layer_num: used in labelling the layer :param activation: activation, defaults to relu :return: tf.keras Convolution2D layer
https://github.com/autorope/donkeycar/blob/688204ca074886321e0d58e75d81d89f04f7a2b6/donkeycar/parts/keras.py#L907-L923
from abc import ABC, abstractmethod from collections import deque import numpy as np from typing import Dict, Tuple, Optional, Union, List, Sequence, Callable from logging import getLogger from tensorflow.python.data.ops.dataset_ops import DatasetV1, DatasetV2 import donkeycar as dk from donkeycar.utils import normalize_image, linear_bin from donkeycar.pipeline.types import TubRecord from donkeycar.parts.interpreter import Interpreter, KerasInterpreter import tensorflow as tf from tensorflow import keras from tensorflow.keras.layers import Input, Dense from tensorflow.keras.layers import Convolution2D, MaxPooling2D, BatchNormalization from tensorflow.keras.layers import Activation, Dropout, Flatten from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import TimeDistributed as TD from tensorflow.keras.layers import Conv3D, MaxPooling3D, Conv2DTranspose from tensorflow.keras.backend import concatenate from tensorflow.keras.models import Model from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint ONE_BYTE_SCALE = 1.0 / 255.0 XY = Union[float, np.ndarray, Tuple[Union[float, np.ndarray], ...]] logger = getLogger(__name__) class KerasPilot(ABC): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3)) -> None: self.input_shape = input_shape self.optimizer = "adam" self.interpreter = interpreter self.interpreter.set_model(self) logger.info(f'Created {self} with interpreter: {interpreter}') def load(self, model_path: str) -> None: logger.info(f'Loading model {model_path}') self.interpreter.load(model_path) def load_weights(self, model_path: str, by_name: bool = True) -> None: self.interpreter.load_weights(model_path, by_name=by_name) def shutdown(self) -> None: pass def compile(self) -> None: pass @abstractmethod def create_model(self): pass def set_optimizer(self, optimizer_type: str, rate: float, decay: float) -> None: if optimizer_type == "adam": optimizer = keras.optimizers.Adam(lr=rate, decay=decay) elif optimizer_type == "sgd": optimizer = keras.optimizers.SGD(lr=rate, decay=decay) elif optimizer_type == "rmsprop": optimizer = keras.optimizers.RMSprop(lr=rate, decay=decay) else: raise Exception(f"Unknown optimizer type: {optimizer_type}") self.interpreter.set_optimizer(optimizer) def get_input_shapes(self) -> List[tf.TensorShape]: return self.interpreter.get_input_shapes() def seq_size(self) -> int: return 0 def run(self, img_arr: np.ndarray, other_arr: List[float] = None) -> Tuple[Union[float, np.ndarray], ...]: norm_arr = normalize_image(img_arr) np_other_array = np.array(other_arr) if other_arr else None return self.inference(norm_arr, np_other_array) def inference(self, img_arr: np.ndarray, other_arr: Optional[np.ndarray]) -> Tuple[Union[float, np.ndarray], ...]: out = self.interpreter.predict(img_arr, other_arr) return self.interpreter_to_output(out) def inference_from_dict(self, input_dict: Dict[str, np.ndarray]) -> Tuple[Union[float, np.ndarray], ...]: output = self.interpreter.predict_from_dict(input_dict) return self.interpreter_to_output(output) @abstractmethod def interpreter_to_output( self, interpreter_out: Sequence[Union[float, np.ndarray]]) -> Tuple[Union[float, np.ndarray], ...]: pass def train(self, model_path: str, train_data: Union[DatasetV1, DatasetV2], train_steps: int, batch_size: int, validation_data: Union[DatasetV1, DatasetV2], validation_steps: int, epochs: int, verbose: int = 1, min_delta: float = .0005, patience: int = 5, show_plot: bool = False) -> tf.keras.callbacks.History: assert isinstance(self.interpreter, KerasInterpreter) model = self.interpreter.model self.compile() callbacks = [ EarlyStopping(monitor='val_loss', patience=patience, min_delta=min_delta), ModelCheckpoint(monitor='val_loss', filepath=model_path, save_best_only=True, verbose=verbose)] history: tf.keras.callbacks.History = model.fit( x=train_data, steps_per_epoch=train_steps, batch_size=batch_size, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, epochs=epochs, verbose=verbose, workers=1, use_multiprocessing=False) if show_plot: try: import matplotlib.pyplot as plt from pathlib import Path plt.figure(1) if 'angle_out_acc' in history.history: plt.subplot(121) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validate'], loc='upper right') if 'angle_out_acc' in history.history: plt.subplot(122) plt.plot(history.history['angle_out_acc']) plt.plot(history.history['val_angle_out_acc']) plt.title('model angle accuracy') plt.ylabel('acc') plt.xlabel('epoch') plt.savefig(Path(model_path).with_suffix('.png')) except Exception as ex: print(f"problems with loss graph: {ex}") return history def x_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), "TubRecord required" img_arr = record.image(cached=True) return img_arr def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: return {'img_in': x} def x_transform_and_process( self, record: Union[TubRecord, List[TubRecord]], img_processor: Callable[[np.ndarray], np.ndarray]) -> XY: x_img = self.x_transform(record) x_process = img_processor(x_img) return x_process def y_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: raise NotImplementedError(f'{self} not ready yet for new training ' f'pipeline') def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: raise NotImplementedError(f'{self} not ready yet for new training ' f'pipeline') def output_types(self) -> Tuple[Dict[str, np.typename], ...]: shapes = self.output_shapes() types = tuple({k: tf.float64 for k in d} for d in shapes) return types def output_shapes(self) -> Dict[str, tf.TensorShape]: return {} def __str__(self) -> str: return type(self).__name__ class KerasCategorical(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), throttle_range: float = 0.5): super().__init__(interpreter, input_shape) self.throttle_range = throttle_range def create_model(self): return default_categorical(self.input_shape) def compile(self): self.interpreter.compile( optimizer=self.optimizer, metrics=['accuracy'], loss={'angle_out': 'categorical_crossentropy', 'throttle_out': 'categorical_crossentropy'}, loss_weights={'angle_out': 0.5, 'throttle_out': 0.5}) def interpreter_to_output(self, interpreter_out): angle_binned, throttle_binned = interpreter_out N = len(throttle_binned) throttle = dk.utils.linear_unbin(throttle_binned, N=N, offset=0.0, R=self.throttle_range) angle = dk.utils.linear_unbin(angle_binned) return angle, throttle def y_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), "TubRecord expected" angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] angle = linear_bin(angle, N=15, offset=1, R=2.0) throttle = linear_bin(throttle, N=20, offset=0.0, R=self.throttle_range) return angle, throttle def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, tuple), 'Expected tuple' angle, throttle = y return {'angle_out': angle, 'throttle_out': throttle} def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'angle_out': tf.TensorShape([15]), 'throttle_out': tf.TensorShape([20])}) return shapes class KerasLinear(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), num_outputs: int = 2): self.num_outputs = num_outputs super().__init__(interpreter, input_shape) def create_model(self): return default_n_linear(self.num_outputs, self.input_shape) def compile(self): self.interpreter.compile(optimizer=self.optimizer, loss='mse') def interpreter_to_output(self, interpreter_out): steering = interpreter_out[0] throttle = interpreter_out[1] return steering[0], throttle[0] def y_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), 'TubRecord expected' angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] return angle, throttle def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, tuple), 'Expected tuple' angle, throttle = y return {'n_outputs0': angle, 'n_outputs1': throttle} def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'n_outputs0': tf.TensorShape([]), 'n_outputs1': tf.TensorShape([])}) return shapes class KerasMemory(KerasLinear): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), mem_length: int = 3, mem_depth: int = 0, mem_start_speed: float = 0.0): self.mem_length = mem_length self.mem_start_speed = mem_start_speed self.mem_seq = deque([[0, mem_start_speed]] * mem_length) self.mem_depth = mem_depth super().__init__(interpreter, input_shape) def seq_size(self) -> int: return self.mem_length + 1 def create_model(self): return default_memory(self.input_shape, self.mem_length, self.mem_depth, ) def load(self, model_path: str) -> None: super().load(model_path) self.mem_length = self.interpreter.get_input_shapes()[1][1] // 2 self.mem_seq = deque([[0, self.mem_start_speed]] * self.mem_length) logger.info(f'Loaded memory model with mem length {self.mem_length}') def run(self, img_arr: np.ndarray, other_arr: List[float] = None) -> Tuple[Union[float, np.ndarray], ...]: np_mem_arr = np.array(self.mem_seq).reshape((2 * self.mem_length,)) img_arr_norm = normalize_image(img_arr) angle, throttle = super().inference(img_arr_norm, np_mem_arr) self.mem_seq.popleft() self.mem_seq.append([angle, throttle]) return angle, throttle def x_transform(self, records: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(records, list), 'List[TubRecord] expected' assert len(records) == self.mem_length + 1, f"Record list of length {self.mem_length} required but " f"{len(records)} was passed" img_arr = records[-1].image(cached=True) mem = [[r.underlying['user/angle'], r.underlying['user/throttle']] for r in records[:-1]] return img_arr, np.array(mem).reshape((2 * self.mem_length,)) def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: assert(isinstance(x, tuple)), 'Tuple expected' img_arr, mem = x return {'img_in': img_arr, 'mem_in': mem} def x_transform_and_process( self, record: Union[TubRecord, List[TubRecord]], img_processor: Callable[[np.ndarray], np.ndarray]) -> XY: xt = self.x_transform(record) assert isinstance(xt, tuple), 'Tuple expected' x_img, mem = xt x_process = img_processor(x_img) return x_process, mem def y_transform(self, records: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(records, list), 'List[TubRecord] expected' angle = records[-1].underlying['user/angle'] throttle = records[-1].underlying['user/throttle'] return angle, throttle def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape), 'mem_in': tf.TensorShape(2 * self.mem_length)}, {'n_outputs0': tf.TensorShape([]), 'n_outputs1': tf.TensorShape([])}) return shapes def __str__(self) -> str: return super().__str__() + f'-L:{self.mem_length}-D:{self.mem_depth}' class KerasInferred(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3)): super().__init__(interpreter, input_shape) def create_model(self): return default_n_linear(1, self.input_shape) def compile(self): self.interpreter.compile(optimizer=self.optimizer, loss='mse') def interpreter_to_output(self, interpreter_out): steering = interpreter_out[0] return steering, dk.utils.throttle(steering) def y_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), "TubRecord expected" angle: float = record.underlying['user/angle'] return angle def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, float), 'Float expected' return {'n_outputs0': y} def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'n_outputs0': tf.TensorShape([])}) return shapes class KerasIMU(KerasPilot): imu_vec = [f'imu/{f}_{x}' for f in ('acl', 'gyr') for x in 'xyz'] def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), num_outputs: int = 2, num_imu_inputs: int = 6): self.num_outputs = num_outputs self.num_imu_inputs = num_imu_inputs super().__init__(interpreter, input_shape) def create_model(self): return default_imu(num_outputs=self.num_outputs, num_imu_inputs=self.num_imu_inputs, input_shape=self.input_shape) def compile(self): self.interpreter.compile(optimizer=self.optimizer, loss='mse') def interpreter_to_output(self, interpreter_out) -> Tuple[Union[float, np.ndarray], ...]: steering = interpreter_out[0] throttle = interpreter_out[1] return steering[0], throttle[0] def x_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), 'TubRecord expected' img_arr = record.image(cached=True) imu_arr = [record.underlying[k] for k in self.imu_vec] return img_arr, np.array(imu_arr) def x_transform_and_process( self, record: Union[TubRecord, List[TubRecord]], img_processor: Callable[[np.ndarray], np.ndarray]) -> XY: xt = self.x_transform(record) assert isinstance(xt, tuple), 'Tuple expected' x_img, x_imu = xt x_img_process = img_processor(x_img) return x_img_process, x_imu def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: assert isinstance(x, tuple), 'Tuple required' return {'img_in': x[0], 'imu_in': x[1]} def y_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), "TubRecord expected" angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] return angle, throttle def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, tuple), 'Expected tuple' angle, throttle = y return {'out_0': angle, 'out_1': throttle} def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape), 'imu_in': tf.TensorShape([self.num_imu_inputs])}, {'out_0': tf.TensorShape([]), 'out_1': tf.TensorShape([])}) return shapes class KerasBehavioral(KerasCategorical): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), throttle_range: float = 0.5, num_behavior_inputs: int = 2): self.num_behavior_inputs = num_behavior_inputs super().__init__(interpreter, input_shape, throttle_range) def create_model(self): return default_bhv(num_bvh_inputs=self.num_behavior_inputs, input_shape=self.input_shape) def x_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), 'TubRecord expected' img_arr = record.image(cached=True) bhv_arr = record.underlying['behavior/one_hot_state_array'] return img_arr, np.array(bhv_arr) def x_transform_and_process( self, record: Union[TubRecord, List[TubRecord]], img_processor: Callable[[np.ndarray], np.ndarray]) -> XY: xt = self.x_transform(record) assert isinstance(xt, tuple), 'Tuple expected' x_img, bhv_arr = xt x_img_process = img_processor(x_img) return x_img_process, bhv_arr def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: assert isinstance(x, tuple), 'Tuple required' return {'img_in': x[0], 'xbehavior_in': x[1]} def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape), 'xbehavior_in': tf.TensorShape([self.num_behavior_inputs])}, {'angle_out': tf.TensorShape([15]), 'throttle_out': tf.TensorShape([20])}) return shapes class KerasLocalizer(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), num_locations: int = 8): self.num_locations = num_locations super().__init__(interpreter, input_shape) def create_model(self): return default_loc(num_locations=self.num_locations, input_shape=self.input_shape) def compile(self): self.interpreter.compile(optimizer=self.optimizer, metrics=['acc'], loss='mse') def interpreter_to_output(self, interpreter_out) -> Tuple[Union[float, np.ndarray], ...]: angle, throttle, track_loc = interpreter_out loc = np.argmax(track_loc) return angle[0], throttle[0], loc def y_transform(self, record: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(record, TubRecord), "TubRecord expected" angle: float = record.underlying['user/angle'] throttle: float = record.underlying['user/throttle'] loc = record.underlying['localizer/location'] loc_one_hot = np.zeros(self.num_locations) loc_one_hot[loc] = 1 return angle, throttle, loc_one_hot def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, tuple), 'Expected tuple' angle, throttle, loc = y return {'angle': angle, 'throttle': throttle, 'zloc': loc} def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'angle': tf.TensorShape([]), 'throttle': tf.TensorShape([]), 'zloc': tf.TensorShape([self.num_locations])}) return shapes class KerasLSTM(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), seq_length=3, num_outputs=2): self.num_outputs = num_outputs self.seq_length = seq_length super().__init__(interpreter, input_shape) self.img_seq = deque() self.optimizer = "rmsprop" def seq_size(self) -> int: return self.seq_length def create_model(self): return rnn_lstm(seq_length=self.seq_length, num_outputs=self.num_outputs, input_shape=self.input_shape) def compile(self): self.interpreter.compile(optimizer=self.optimizer, loss='mse') def x_transform(self, records: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(records, list), 'List[TubRecord] expected' assert len(records) == self.seq_length, f"Record list of length {self.seq_length} required but " f"{len(records)} was passed" img_arrays = [rec.image(cached=True) for rec in records] return np.array(img_arrays) def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: img_arr = x return {'img_in': img_arr} def x_transform_and_process( self, records: Union[TubRecord, List[TubRecord]], img_processor: Callable[[np.ndarray], np.ndarray]) -> XY: img_seq = self.x_transform(records) assert isinstance(img_seq, np.ndarray) x_process = [img_processor(img) for img in img_seq] return np.array(x_process) def y_transform(self, records: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(records, list), 'List[TubRecord] expected' angle = records[-1].underlying['user/angle'] throttle = records[-1].underlying['user/throttle'] return angle, throttle def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, tuple), 'Expected tuple' return {'model_outputs': list(y)} def run(self, img_arr, other_arr=None): if img_arr.shape[2] == 3 and self.input_shape[2] == 1: img_arr = dk.utils.rgb2gray(img_arr) while len(self.img_seq) < self.seq_length: self.img_seq.append(img_arr) self.img_seq.popleft() self.img_seq.append(img_arr) new_shape = (self.seq_length, *self.input_shape) img_arr = np.array(self.img_seq).reshape(new_shape) img_arr_norm = normalize_image(img_arr) return self.inference(img_arr_norm, other_arr) def interpreter_to_output(self, interpreter_out) -> Tuple[Union[float, np.ndarray], ...]: steering = interpreter_out[0] throttle = interpreter_out[1] return steering, throttle def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'model_outputs': tf.TensorShape([self.num_outputs])}) return shapes def __str__(self) -> str: return f'{super().__str__()}-L:{self.seq_length}' class Keras3D_CNN(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), seq_length=20, num_outputs=2): self.num_outputs = num_outputs self.seq_length = seq_length super().__init__(interpreter, input_shape) self.img_seq = deque() def seq_size(self) -> int: return self.seq_length def create_model(self): return build_3d_cnn(self.input_shape, s=self.seq_length, num_outputs=self.num_outputs) def compile(self): self.interpreter.compile(loss='mse', optimizer=self.optimizer) def x_transform(self, records: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(records, list), 'List[TubRecord] expected' assert len(records) == self.seq_length, f"Record list of length {self.seq_length} required but " f"{len(records)} was passed" img_arrays = [rec.image(cached=True) for rec in records] return np.array(img_arrays) def x_translate(self, x: XY) -> Dict[str, Union[float, np.ndarray]]: img_arr = x return {'img_in': img_arr} def x_transform_and_process( self, record: Union[TubRecord, List[TubRecord]], img_processor: Callable[[np.ndarray], np.ndarray]) -> XY: img_seq = self.x_transform(record) assert isinstance(img_seq, np.ndarray), 'Expected np.ndarray' x_process = [img_processor(img) for img in img_seq] return np.array(x_process) def y_transform(self, records: Union[TubRecord, List[TubRecord]]) -> XY: assert isinstance(records, list), 'List[TubRecord] expected' angle = records[-1].underlying['user/angle'] throttle = records[-1].underlying['user/throttle'] return angle, throttle def y_translate(self, y: XY) -> Dict[str, Union[float, List[float]]]: assert isinstance(y, tuple), 'Expected tuple' return {'outputs': list(y)} def run(self, img_arr, other_arr=None): if img_arr.shape[2] == 3 and self.input_shape[2] == 1: img_arr = dk.utils.rgb2gray(img_arr) while len(self.img_seq) < self.seq_length: self.img_seq.append(img_arr) self.img_seq.popleft() self.img_seq.append(img_arr) new_shape = (self.seq_length, *self.input_shape) img_arr = np.array(self.img_seq).reshape(new_shape) img_arr_norm = normalize_image(img_arr) return self.inference(img_arr_norm, other_arr) def interpreter_to_output(self, interpreter_out) -> Tuple[Union[float, np.ndarray], ...]: steering = interpreter_out[0] throttle = interpreter_out[1] return steering, throttle def output_shapes(self): img_shape = self.get_input_shapes()[0][1:] shapes = ({'img_in': tf.TensorShape(img_shape)}, {'outputs': tf.TensorShape([self.num_outputs])}) return shapes class KerasLatent(KerasPilot): def __init__(self, interpreter: Interpreter = KerasInterpreter(), input_shape: Tuple[int, ...] = (120, 160, 3), num_outputs: int = 2): self.num_outputs = num_outputs super().__init__(interpreter, input_shape) def create_model(self): return default_latent(self.num_outputs, self.input_shape) def compile(self): loss = {"img_out": "mse", "n_outputs0": "mse", "n_outputs1": "mse"} weights = {"img_out": 100.0, "n_outputs0": 2.0, "n_outputs1": 1.0} self.interpreter.compile(optimizer=self.optimizer, loss=loss, loss_weights=weights) def interpreter_to_output(self, interpreter_out) -> Tuple[Union[float, np.ndarray], ...]: steering = interpreter_out[1] throttle = interpreter_out[2] return steering[0][0], throttle[0][0]
MIT License
rlabbe/filterpy
filterpy/kalman/square_root.py
SquareRootKalmanFilter.Q1_2
python
def Q1_2(self): return self._Q1_2
Sqrt Process uncertainty
https://github.com/rlabbe/filterpy/blob/a437893597957764fb6b415bfb5640bb117f5b99/filterpy/kalman/square_root.py#L281-L283
from __future__ import (absolute_import, division) from copy import deepcopy import numpy as np from numpy import dot, zeros, eye from scipy.linalg import cholesky, qr, pinv from filterpy.common import pretty_str class SquareRootKalmanFilter(object): def __init__(self, dim_x, dim_z, dim_u=0): if dim_z < 1: raise ValueError('dim_x must be 1 or greater') if dim_z < 1: raise ValueError('dim_x must be 1 or greater') if dim_u < 0: raise ValueError('dim_x must be 0 or greater') self.dim_x = dim_x self.dim_z = dim_z self.dim_u = dim_u self.x = zeros((dim_x, 1)) self._P = eye(dim_x) self._P1_2 = eye(dim_x) self._Q = eye(dim_x) self._Q1_2 = eye(dim_x) self.B = 0. self.F = np.eye(dim_x) self.H = np.zeros((dim_z, dim_x)) self._R1_2 = eye(dim_z) self._R = eye(dim_z) self.z = np.array([[None]*self.dim_z]).T self.K = 0. self.S = 0. self.y = zeros((dim_z, 1)) self._I = np.eye(dim_x) self.M = np.zeros((dim_z + dim_x, dim_z + dim_x)) self.x_prior = np.copy(self.x) self._P1_2_prior = np.copy(self._P1_2) self.x_post = np.copy(self.x) self._P1_2_post = np.copy(self._P1_2) def update(self, z, R2=None): if z is None: self.z = np.array([[None]*self.dim_z]).T self.x_post = self.x.copy() self._P1_2_post = np.copy(self._P1_2) return if R2 is None: R2 = self._R1_2 elif np.isscalar(R2): R2 = eye(self.dim_z) * R2 dim_z = self.dim_z M = self.M M[0:dim_z, 0:dim_z] = R2.T M[dim_z:, 0:dim_z] = dot(self.H, self._P1_2).T M[dim_z:, dim_z:] = self._P1_2.T _, self.S = qr(M) self.K = self.S[0:dim_z, dim_z:].T N = self.S[0:dim_z, 0:dim_z].T self.y = z - dot(self.H, self.x) self.x += dot(self.K, pinv(N)).dot(self.y) self._P1_2 = self.S[dim_z:, dim_z:].T self.z = deepcopy(z) self.x_post = self.x.copy() self._P1_2_post = np.copy(self._P1_2) def predict(self, u=0): self.x = dot(self.F, self.x) + dot(self.B, u) _, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T) self._P1_2 = P2[:self.dim_x, :self.dim_x].T self.x_prior = np.copy(self.x) self._P1_2_prior = np.copy(self._P1_2) def residual_of(self, z): return z - dot(self.H, self.x) def measurement_of_state(self, x): return dot(self.H, x) @property def Q(self): return dot(self._Q1_2.T, self._Q1_2) @property
MIT License
sk2/ank_legacy_v2
AutoNetkit/netkit.py
Netkit.connect_to_server
python
def connect_to_server(self): shell = None if self.host and self.username: ssh_link = self.shell if ssh_link != None: return ssh_link shell = pxssh.pxssh() shell.logfile = self.logfile LOG.info( "Connecting to {0}".format(self.host) ) shell.login(self.host, self.username) LOG.info( "Connected to " + self.host ) else: shell = pexpect.spawn (self.shell_type) shell.sendline("uname") shell.logfile = self.logfile shell.setecho(True) i = shell.expect(["Linux", "Darwin", pexpect.EOF, NETKIT_PROMPT]) if i == 0: shell.sendline("ls") elif i == 1: LOG.warn("Specified Netkit host is running Mac OS X, " "please specify a Linux Netkit host.") return None else: LOG.warn("Provided Netkit host is not running Linux") self.shell = shell return
Connects to Netkit server (if remote)
https://github.com/sk2/ank_legacy_v2/blob/83a28aa54a4ea74962ee9a8c44f856a006a2e675/AutoNetkit/netkit.py#L102-L147
__author__ = """\n""".join(['Simon Knight ([email protected])', 'Hung Nguyen ([email protected])']) import config import logging LOG = logging.getLogger("ANK") try: import pexpect import pxssh except ImportError: LOG.error("Netkit deployment requires pexpect") import os import sys from netaddr import IPNetwork NETKIT_PROMPT = "~#" class Netkit: def __init__(self, host=None, username=None, shell_type="bash", tapsn=IPNetwork("172.16.0.0/16")): self.host = host self.username = username self.shell = None self.shell_type = shell_type self.tap_host = tapsn[1] self.tap_ip = tapsn[2] self.NETKIT_PROMPT = NETKIT_PROMPT self.tap_hostname = "taptunnelvm" self.local_server = True if self.host and self.username: self.local_server = False self.logfile = open( os.path.join(config.log_dir, "pxssh.log"), 'w') def get_shell(self): if self.shell: return self.shell else: self.connect_to_server() return self.shell def transfer_file(self, local_file): if self.local_server: LOG.warn("Can only SCP to remote Netkit server") return child = pexpect.spawn("scp {0} {1}@{2}:.".format(local_file, self.username, self.host)) child.logfile = self.logfile child.expect(pexpect.EOF) LOG.debug( "SCP result %s"% child.before.strip()) return
BSD 3-Clause New or Revised License
deepmind/xmanager
xmanager/xm/core.py
Experiment.add
python
def add(self, job, args=immutabledict.immutabledict(), role=WorkUnitRole()): experiment_unit = self._create_experiment_unit(args, role) async def launch(): await experiment_unit.add(job, args) return experiment_unit return asyncio.wrap_future(self._create_task(launch()))
Adds a Job / JobGroup to the experiment. A new Experiment Unit is created to run the job. Args: job: A Job or JobGroup to add. args: Keyword arguments to be passed to the job. For Job and JobGroup args are recursively expanded. For example, ``` wu.add( JobGroup(agent=Job(...)), args={'agent': {'args': {'learning_rate': 0.1}}}, ) ``` would update `args` field of a job `agent` in the group. role: The role of this unit in the experiment structure. Returns: An awaitable that would be fulfilled when the job is launched.
https://github.com/deepmind/xmanager/blob/4963986b77228bed72afcb6ada7008a7eb3a1393/xmanager/xm/core.py#L541-L572
import abc import asyncio from concurrent import futures import functools import getpass import inspect import queue import threading from typing import Any, Awaitable, Callable, Dict, Mapping, Optional, Sequence, overload import attr import immutabledict from xmanager.xm import async_packager from xmanager.xm import id_predictor from xmanager.xm import job_blocks from xmanager.xm import job_operators from xmanager.xm import metadata_context from xmanager.xm import pattern_matching def _apply_args_to_job(job: job_blocks.Job, args: Mapping[str, Any]) -> None: if 'args' in args: job.args = job_blocks.merge_args(job.args, args['args']) job.env_vars.update(args.get('env_vars', {})) def _apply_args_to_job_group(job_group: job_blocks.JobGroup, args: Mapping[str, Any]) -> None: if args: for key, job in job_group.jobs.items(): _apply_args(job, args.get(key, {})) _apply_args = pattern_matching.match( _apply_args_to_job, _apply_args_to_job_group, pattern_matching.Case([job_blocks.JobGeneratorType, Any], lambda other, args: None)) class ExperimentUnitStatus(abc.ABC): @property @abc.abstractmethod def is_active(self) -> bool: raise NotImplementedError @property @abc.abstractmethod def is_completed(self) -> bool: raise NotImplementedError @property @abc.abstractmethod def is_failed(self) -> bool: raise NotImplementedError @property @abc.abstractmethod def message(self) -> str: raise NotImplementedError class ExperimentUnitError(RuntimeError): class ExperimentUnitFailedError(ExperimentUnitError): class ExperimentUnitNotCompletedError(ExperimentUnitError): class NotFoundError(KeyError): def _work_unit_arguments( job: job_blocks.JobType, args: Mapping[str, Any], ) -> Mapping[str, Any]: if args: return args def deduce_args_for_job(job: job_blocks.Job) -> Dict[str, Any]: args = { 'args': job.args.to_dict(kwargs_only=True), 'env_vars': job.env_vars } return {key: value for key, value in args.items() if value} def deduce_args_for_job_group(group: job_blocks.JobGroup) -> Dict[str, Any]: args = {} for job_name, job in group.jobs.items(): job_args = deduce_args(job) if job_args: args[job_name] = job_args return args deduce_args = pattern_matching.match( deduce_args_for_job, deduce_args_for_job_group, pattern_matching.Case([job_blocks.JobGeneratorType], lambda generator: {})) return deduce_args(job) class ExperimentUnitRole(abc.ABC): pass class ExperimentUnit(abc.ABC): def __init__(self, experiment: 'Experiment', create_task: Callable[[Awaitable[Any]], futures.Future], args: Mapping[str, Any], role: ExperimentUnitRole) -> None: self._experiment = experiment self._create_task = create_task self._args = args self._role = role self._launched_error = None @property def experiment_id(self) -> int: return self._experiment.experiment_id @property @functools.lru_cache() def _launched_event(self) -> asyncio.Event: return asyncio.Event() def add( self, job: job_blocks.JobType, args: Mapping[str, Any] = immutabledict.immutabledict() ) -> Awaitable[None]: job = job_operators.shallow_copy_job_type(job) _apply_args(job, args) job_operators.populate_job_names(job) def launch_job(job: job_blocks.Job) -> Awaitable[None]: return self._launch_job_group( job_blocks.JobGroup(**{job.name: job}), _work_unit_arguments(job, self._args)) def launch_job_group(group: job_blocks.JobGroup) -> Awaitable[None]: return self._launch_job_group(group, _work_unit_arguments(group, self._args)) def launch_job_generator( job_generator: job_blocks.JobGeneratorType) -> Awaitable[None]: if not inspect.iscoroutinefunction(job_generator): raise ValueError( 'Job generator must be an async function. Signature needs to be ' '`async def job_generator(work_unit: xm.WorkUnit):`') return job_generator(self, **args) job_awaitable = pattern_matching.match(launch_job, launch_job_group, launch_job_generator)( job) async def launch(): try: await job_awaitable except Exception as e: self._launched_error = ExperimentUnitError(e) raise finally: self._launched_event.set() return asyncio.wrap_future(self._create_task(launch())) async def wait_until_complete(self): await self._launched_event.wait() if self._launched_error: raise self._launched_error await self._wait_until_complete() async def _launch_job_group(self, job_group: job_blocks.JobGroup, args_view: Mapping[str, Any]) -> None: raise NotImplementedError async def _wait_until_complete(self) -> None: raise NotImplementedError def stop(self) -> None: raise NotImplementedError def get_status(self) -> ExperimentUnitStatus: raise NotImplementedError @property @abc.abstractmethod def experiment_unit_name(self) -> str: raise NotImplementedError def get_full_job_name(self, job_name: str) -> str: return f'{self.experiment_unit_name}_{job_name}' @property def context(self) -> metadata_context.MetadataContext: return metadata_context.MetadataContext( creator=getpass.getuser(), annotations=metadata_context.ContextAnnotations()) @attr.s(auto_attribs=True) class WorkUnitRole(ExperimentUnitRole): class WorkUnit(ExperimentUnit): @property @abc.abstractmethod def work_unit_id(self) -> int: raise NotImplementedError @attr.s(auto_attribs=True) class AuxiliaryUnitRole(ExperimentUnitRole): termination_delay_secs: int class Experiment(abc.ABC): _event_loop: asyncio.AbstractEventLoop _running_tasks: queue.Queue _work_unit_id_predictor: id_predictor.Predictor _async_packager: async_packager.AsyncPackager @property def experiment_id(self) -> int: raise NotImplementedError def _enter(self) -> None: self._running_tasks = queue.Queue() self._work_unit_id_predictor = id_predictor.Predictor(1 + self.work_unit_count) def __enter__(self): if asyncio.get_event_loop().is_running(): raise RuntimeError('When using Experiment from a coroutine plase use ' '`async with` syntax') self._event_loop = asyncio.new_event_loop() asyncio.get_child_watcher().attach_loop(self._event_loop) self._event_loop_thread = threading.Thread( target=self._event_loop.run_forever, daemon=True) self._event_loop_thread.start() self._enter() return self def _wait_for_tasks(self): while not self._running_tasks.empty(): self._running_tasks.get_nowait().result() def __exit__(self, exc_type, exc_value, traceback): self._wait_for_tasks() self._event_loop.call_soon_threadsafe(self._event_loop.stop) self._event_loop_thread.join() async def __aenter__(self): self._event_loop = asyncio.get_event_loop() self._enter() return self async def _await_for_tasks(self): while not self._running_tasks.empty(): await asyncio.wrap_future(self._running_tasks.get_nowait()) async def __aexit__(self, exc_type, exc_value, traceback): await self._await_for_tasks() @classmethod def package( cls, packageables: Sequence[job_blocks.Packageable] = () ) -> Sequence[job_blocks.Executable]: return cls._async_packager.package(packageables) @classmethod def package_async( cls, packageable: job_blocks.Packageable) -> Awaitable[job_blocks.Executable]: return cls._async_packager.add(packageable) @overload def add(self, job: job_blocks.JobType, args: Mapping[str, Any] = ..., role: WorkUnitRole = ...) -> Awaitable[WorkUnit]: ... @overload def add(self, job: job_blocks.JobType, args: Mapping[str, Any], role: ExperimentUnitRole) -> Awaitable[ExperimentUnit]: ... @overload def add( self, job: job_blocks.JobType, args: Mapping[str, Any] = ..., *, role: ExperimentUnitRole ) -> Awaitable[ExperimentUnit]: ...
Apache License 2.0
tensorflow/data-validation
tensorflow_data_validation/utils/stats_util.py
load_stats_binary
python
def load_stats_binary( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_proto.ParseFromString(io_util.read_file_to_string( input_path, binary_mode=True)) return stats_proto
Loads a serialized DatasetFeatureStatisticsList proto from a file. Args: input_path: File path from which to load the DatasetFeatureStatisticsList proto. Returns: A DatasetFeatureStatisticsList proto.
https://github.com/tensorflow/data-validation/blob/9855619b40a1c6dab2be3509fa252eaea5120596/tensorflow_data_validation/utils/stats_util.py#L220-L234
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging from typing import Dict, Optional, Text, Union import numpy as np import pyarrow as pa import tensorflow as tf from tensorflow_data_validation import types from tensorflow_data_validation.arrow import arrow_util from tensorflow_data_validation.utils import io_util from google.protobuf import text_format from tensorflow_metadata.proto.v0 import statistics_pb2 _NP_DTYPE_KIND_TO_FEATURE_TYPE = { 'f': statistics_pb2.FeatureNameStatistics.FLOAT, 'i': statistics_pb2.FeatureNameStatistics.INT, 'u': statistics_pb2.FeatureNameStatistics.INT, 'S': statistics_pb2.FeatureNameStatistics.STRING, 'O': statistics_pb2.FeatureNameStatistics.STRING, 'U': statistics_pb2.FeatureNameStatistics.STRING, } DOMAIN_INFO = 'domain_info' def maybe_get_utf8(value: bytes) -> Optional[Text]: try: decoded_value = value.decode('utf-8') except UnicodeError: return None return decoded_value def get_feature_type( dtype: np.dtype) -> Optional[types.FeatureNameStatisticsType]: return _NP_DTYPE_KIND_TO_FEATURE_TYPE.get(dtype.kind) def get_feature_type_from_arrow_type( feature_path: types.FeaturePath, arrow_type: pa.DataType) -> Optional[types.FeatureNameStatisticsType]: if pa.types.is_null(arrow_type): return None if not arrow_util.is_list_like(arrow_type): raise TypeError('Expected feature column to be a ' '(Large)List<primitive|struct> or null, but feature {} ' 'was {}.'.format(feature_path, arrow_type)) value_type = arrow_util.get_innermost_nested_type(arrow_type) if pa.types.is_integer(value_type): return statistics_pb2.FeatureNameStatistics.INT elif pa.types.is_floating(value_type): return statistics_pb2.FeatureNameStatistics.FLOAT elif arrow_util.is_binary_like(value_type): return statistics_pb2.FeatureNameStatistics.STRING elif pa.types.is_struct(value_type): return statistics_pb2.FeatureNameStatistics.STRUCT elif pa.types.is_null(value_type): return None raise TypeError('Feature {} has unsupported arrow type: {}'.format( feature_path, arrow_type)) def make_dataset_feature_stats_proto( stats_values: Dict[types.FeaturePath, Dict[Text, float]] ) -> statistics_pb2.DatasetFeatureStatistics: result = statistics_pb2.DatasetFeatureStatistics() feature_paths = sorted(stats_values.keys()) for feature_path in feature_paths: feature_stats_proto = _make_feature_stats_proto(stats_values[feature_path], feature_path) new_feature_stats_proto = result.features.add() new_feature_stats_proto.CopyFrom(feature_stats_proto) return result def _make_feature_stats_proto( stats_values: Dict[Text, float], feature_path: types.FeaturePath) -> statistics_pb2.FeatureNameStatistics: result = statistics_pb2.FeatureNameStatistics() result.path.CopyFrom(feature_path.to_proto()) stat_names = sorted(stats_values.keys()) for stat_name in stat_names: result.custom_stats.add(name=stat_name, num=stats_values[stat_name]) return result def write_stats_text(stats: statistics_pb2.DatasetFeatureStatisticsList, output_path: Text) -> None: if not isinstance(stats, statistics_pb2.DatasetFeatureStatisticsList): raise TypeError( 'stats is of type %s, should be a ' 'DatasetFeatureStatisticsList proto.' % type(stats).__name__) stats_proto_text = text_format.MessageToString(stats) io_util.write_string_to_file(output_path, stats_proto_text) def load_stats_text( input_path: Text) -> statistics_pb2.DatasetFeatureStatisticsList: stats_proto = statistics_pb2.DatasetFeatureStatisticsList() stats_text = io_util.read_file_to_string(input_path) text_format.Parse(stats_text, stats_proto) return stats_proto
Apache License 2.0
numan/py-analytics
analytics/backends/base.py
BaseAnalyticsBackend.get_metrics
python
def get_metrics(self, metric_identifiers, from_date, limit=10, group_by="week", **kwargs): raise NotImplementedError()
Retrieves a multiple metrics as efficiently as possible. :param metric_identifiers: a list of tuples of the form `(unique_identifier, metric_name`) identifying which metrics to retrieve. For example [('user:1', 'people_invited',), ('user:2', 'people_invited',), ('user:1', 'comments_posted',), ('user:2', 'comments_posted',)] :param from_date: A python date object :param limit: The total number of months to retrive starting from ``from_date`` :param group_by: The type of aggregation to perform on the metric. Choices are: ``day``, ``week`` or ``month``
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/base.py#L91-L101
class BaseAnalyticsBackend(object): _analytics_backend = None _prefix = "_analytics" def __init__(self, settings, **kwargs): if "prefix" in kwargs: self._prefix = kwargs.get("prefix") def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs): return NotImplementedError() def track_metric(self, unique_identifier, metric, date, inc_amt=1, **kwargs): raise NotImplementedError() def get_metric_by_day(self, unique_identifier, metric, from_date, limit=10, **kwargs): raise NotImplementedError() def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs): raise NotImplementedError() def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs): raise NotImplementedError()
Apache License 2.0
imicknl/ha-tahoma
custom_components/tahoma/climate_devices/somfy_thermostat.py
SomfyThermostat.async_set_hvac_mode
python
async def async_set_hvac_mode(self, hvac_mode: str) -> None: if hvac_mode == self.hvac_mode: return if hvac_mode == HVAC_MODE_AUTO: self._saved_target_temp = self.target_temperature await self.executor.async_execute_command(COMMAND_EXIT_DEROGATION) await self.executor.async_execute_command(COMMAND_REFRESH_STATE) elif hvac_mode == HVAC_MODE_HEAT: await self.async_set_preset_mode(PRESET_NONE)
Set new target hvac mode.
https://github.com/imicknl/ha-tahoma/blob/a0490949f7f416a59019582459aa70c1de108258/custom_components/tahoma/climate_devices/somfy_thermostat.py#L223-L232
import logging from typing import Optional from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( CURRENT_HVAC_HEAT, CURRENT_HVAC_IDLE, HVAC_MODE_AUTO, HVAC_MODE_HEAT, PRESET_AWAY, PRESET_HOME, PRESET_NONE, SUPPORT_PRESET_MODE, SUPPORT_TARGET_TEMPERATURE, ) from homeassistant.const import ( ATTR_TEMPERATURE, EVENT_HOMEASSISTANT_START, STATE_UNAVAILABLE, STATE_UNKNOWN, TEMP_CELSIUS, ) from homeassistant.core import callback from homeassistant.helpers.event import async_track_state_change from ..coordinator import OverkizDataUpdateCoordinator from ..entity import OverkizEntity _LOGGER = logging.getLogger(__name__) COMMAND_EXIT_DEROGATION = "exitDerogation" COMMAND_REFRESH_STATE = "refreshState" COMMAND_SET_DEROGATION = "setDerogation" COMMAND_SET_MODE_TEMPERATURE = "setModeTemperature" CORE_DEROGATED_TARGET_TEMPERATURE_STATE = "core:DerogatedTargetTemperatureState" CORE_DEROGATION_ACTIVATION_STATE = "core:DerogationActivationState" PRESET_FREEZE = "Freeze" PRESET_NIGHT = "Night" ST_HEATING_MODE_STATE = "somfythermostat:HeatingModeState" ST_DEROGATION_HEATING_MODE_STATE = "somfythermostat:DerogationHeatingModeState" STATE_DEROGATION_FURTHER_NOTICE = "further_notice" STATE_DEROGATION_ACTIVE = "active" STATE_DEROGATION_INACTIVE = "inactive" STATE_PRESET_AT_HOME = "atHomeMode" STATE_PRESET_AWAY = "awayMode" STATE_PRESET_FREEZE = "freezeMode" STATE_PRESET_MANUAL = "manualMode" STATE_PRESET_SLEEPING_MODE = "sleepingMode" STATE_PRESET_SUDDEN_DROP_MODE = "suddenDropMode" MAP_HVAC_MODES = { STATE_DEROGATION_ACTIVE: HVAC_MODE_HEAT, STATE_DEROGATION_INACTIVE: HVAC_MODE_AUTO, } MAP_PRESET_MODES = { STATE_PRESET_AT_HOME: PRESET_HOME, STATE_PRESET_AWAY: PRESET_AWAY, STATE_PRESET_FREEZE: PRESET_FREEZE, STATE_PRESET_MANUAL: PRESET_NONE, STATE_PRESET_SLEEPING_MODE: PRESET_NIGHT, STATE_PRESET_SUDDEN_DROP_MODE: PRESET_NONE, } MAP_REVERSE_PRESET_MODES = {v: k for k, v in MAP_PRESET_MODES.items()} MAP_PRESET_TEMPERATURES = { PRESET_HOME: "somfythermostat:AtHomeTargetTemperatureState", PRESET_AWAY: "somfythermostat:AwayModeTargetTemperatureState", PRESET_FREEZE: "somfythermostat:FreezeModeTargetTemperatureState", PRESET_NIGHT: "somfythermostat:SleepingModeTargetTemperatureState", } class SomfyThermostat(OverkizEntity, ClimateEntity): _attr_temperature_unit = TEMP_CELSIUS _attr_supported_features = SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE _attr_hvac_modes = [HVAC_MODE_AUTO, HVAC_MODE_HEAT] _attr_preset_modes = [ PRESET_NONE, PRESET_FREEZE, PRESET_NIGHT, PRESET_AWAY, PRESET_HOME, ] _attr_min_temp = 15.0 _attr_max_temp = 26.0 def __init__(self, device_url: str, coordinator: OverkizDataUpdateCoordinator): super().__init__(device_url, coordinator) self._temp_sensor_entity_id = None if self.hvac_mode == HVAC_MODE_AUTO: if self.preset_mode == PRESET_NONE: self._saved_target_temp = None else: self._saved_target_temp = self.executor.select_state( MAP_PRESET_TEMPERATURES[self.preset_mode] ) else: self._saved_target_temp = self.executor.select_state( CORE_DEROGATED_TARGET_TEMPERATURE_STATE ) self._current_temperature = None async def async_added_to_hass(self): await super().async_added_to_hass() entity_registry = await self.hass.helpers.entity_registry.async_get_registry() self._temp_sensor_entity_id = next( ( entity_id for entity_id, entry in entity_registry.entities.items() if entry.unique_id == f"{self.base_device_url}#2-core:TemperatureState" ), None, ) if self._temp_sensor_entity_id: async_track_state_change( self.hass, self._temp_sensor_entity_id, self._async_temp_sensor_changed ) else: _LOGGER.warning( "Temperature sensor could not be found for entity %s", self.name ) @callback def _async_startup(event): if self._temp_sensor_entity_id: temp_sensor_state = self.hass.states.get(self._temp_sensor_entity_id) if temp_sensor_state and temp_sensor_state.state != STATE_UNKNOWN: self.update_temp(temp_sensor_state) self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup) self.schedule_update_ha_state(True) async def _async_temp_sensor_changed(self, entity_id, old_state, new_state) -> None: if new_state is None or old_state == new_state: return self.update_temp(new_state) self.schedule_update_ha_state() @callback def update_temp(self, state): if state is None or state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]: return try: self._current_temperature = float(state.state) except ValueError as ex: _LOGGER.error("Unable to update from sensor: %s", ex) @property def hvac_mode(self) -> str: return MAP_HVAC_MODES[ self.executor.select_state(CORE_DEROGATION_ACTIVATION_STATE) ] @property def hvac_action(self) -> str: if not self.current_temperature or not self.target_temperature: return CURRENT_HVAC_IDLE if self.current_temperature < self.target_temperature: return CURRENT_HVAC_HEAT return CURRENT_HVAC_IDLE @property def preset_mode(self) -> Optional[str]: if self.hvac_mode == HVAC_MODE_AUTO: return MAP_PRESET_MODES[self.executor.select_state(ST_HEATING_MODE_STATE)] return MAP_PRESET_MODES[ self.executor.select_state(ST_DEROGATION_HEATING_MODE_STATE) ] @property def current_temperature(self) -> Optional[float]: return self._current_temperature @property def target_temperature(self): if self.hvac_mode == HVAC_MODE_AUTO: if self.preset_mode == PRESET_NONE: return None return self.executor.select_state(MAP_PRESET_TEMPERATURES[self.preset_mode]) return self.executor.select_state(CORE_DEROGATED_TARGET_TEMPERATURE_STATE) async def async_set_temperature(self, **kwargs) -> None: temperature = kwargs.get(ATTR_TEMPERATURE) if temperature is None: return if temperature < self.min_temp: temperature = self.min_temp elif temperature > self.max_temp: temperature = self.max_temp await self.executor.async_execute_command( COMMAND_SET_DEROGATION, temperature, STATE_DEROGATION_FURTHER_NOTICE ) await self.executor.async_execute_command( COMMAND_SET_MODE_TEMPERATURE, STATE_PRESET_MANUAL, temperature ) await self.executor.async_execute_command(COMMAND_REFRESH_STATE)
MIT License
ganeti/ganeti
lib/mcpu.py
Processor._AcquireLocks
python
def _AcquireLocks(self, level, names, shared, opportunistic, timeout, opportunistic_count=1, request_only=False): self._CheckLocksEnabled() if self._cbs: priority = self._cbs.CurrentPriority() else: priority = None if priority is None: priority = constants.OP_PRIO_DEFAULT if names == locking.ALL_SET: if opportunistic: expand_fns = { locking.LEVEL_CLUSTER: (lambda: [locking.BGL]), locking.LEVEL_INSTANCE: self.cfg.GetInstanceList, locking.LEVEL_NODEGROUP: self.cfg.GetNodeGroupList, locking.LEVEL_NODE: self.cfg.GetNodeList, locking.LEVEL_NODE_RES: self.cfg.GetNodeList, locking.LEVEL_NETWORK: self.cfg.GetNetworkList, } names = expand_fns[level]() else: names = locking.LOCKSET_NAME names = _LockList(names) names.sort() levelname = locking.LEVEL_NAMES[level] locks = ["%s/%s" % (levelname, lock) for lock in list(names)] if not names: logging.debug("Acquiring no locks for (%s) at level %s", self._wconfdcontext, levelname) return [] if shared: request = [[lock, "shared"] for lock in locks] else: request = [[lock, "exclusive"] for lock in locks] if request_only: logging.debug("Lock request for level %s is %s", level, request) return request self.cfg.OutDate() if timeout is None: logging.info("Definitely requesting %s for %s", request, self._wconfdcontext) for r in request: logging.debug("Definite request %s for %s", r, self._wconfdcontext) self.wconfd.Client().UpdateLocksWaiting(self._wconfdcontext, priority, [r]) while True: pending = self.wconfd.Client().HasPendingRequest(self._wconfdcontext) if not pending: break time.sleep(10.0 * random.random()) elif opportunistic: logging.debug("For %ss trying to opportunistically acquire" " at least %d of %s for %s.", timeout, opportunistic_count, locks, self._wconfdcontext) locks = utils.SimpleRetry( lambda l: l != [], self.wconfd.Client().GuardedOpportunisticLockUnion, 2.0, timeout, args=[opportunistic_count, self._wconfdcontext, request]) logging.debug("Managed to get the following locks: %s", locks) if locks == []: raise LockAcquireTimeout() else: self._RequestAndWait(request, timeout) return locks
Acquires locks via the Ganeti lock manager. @type level: int @param level: Lock level @type names: list or string @param names: Lock names @type shared: bool @param shared: Whether the locks should be acquired in shared mode @type opportunistic: bool @param opportunistic: Whether to acquire opportunistically @type timeout: None or float @param timeout: Timeout for acquiring the locks @type request_only: bool @param request_only: do not acquire the locks, just return the request @raise LockAcquireTimeout: In case locks couldn't be acquired in specified amount of time; in this case, locks still might be acquired or a request pending.
https://github.com/ganeti/ganeti/blob/4d21019c72cba4d746f5d17ca22098f4c7682e9c/lib/mcpu.py#L374-L475
import sys import logging import random import time import itertools import traceback from ganeti import opcodes from ganeti import opcodes_base from ganeti import constants from ganeti import errors from ganeti import hooksmaster from ganeti import cmdlib from ganeti import locking from ganeti import utils from ganeti import wconfd sighupReceived = [False] lusExecuting = [0] _OP_PREFIX = "Op" _LU_PREFIX = "LU" class LockAcquireTimeout(Exception): def _CalculateLockAttemptTimeouts(): result = [constants.LOCK_ATTEMPTS_MINWAIT] running_sum = result[0] while running_sum < constants.LOCK_ATTEMPTS_TIMEOUT: timeout = (result[-1] * 1.05) ** 1.25 timeout = min(timeout, constants.LOCK_ATTEMPTS_MAXWAIT) timeout = max(timeout, constants.LOCK_ATTEMPTS_MINWAIT) result.append(timeout) running_sum += timeout return result class LockAttemptTimeoutStrategy(object): __slots__ = [ "_timeouts", "_random_fn", "_time_fn", ] _TIMEOUT_PER_ATTEMPT = _CalculateLockAttemptTimeouts() def __init__(self, _time_fn=time.time, _random_fn=random.random): object.__init__(self) self._timeouts = iter(self._TIMEOUT_PER_ATTEMPT) self._time_fn = _time_fn self._random_fn = _random_fn def NextAttempt(self): try: timeout = next(self._timeouts) except StopIteration: timeout = None if timeout is not None: variation_range = timeout * 0.1 timeout += ((self._random_fn() * variation_range) - (variation_range * 0.5)) return timeout class OpExecCbBase(object): def NotifyStart(self): def NotifyRetry(self): def Feedback(self, *args): def CurrentPriority(self): return None def SubmitManyJobs(self, jobs): raise NotImplementedError def _LUNameForOpName(opname): assert opname.startswith(_OP_PREFIX), "Invalid OpCode name, doesn't start with %s: %s" % (_OP_PREFIX, opname) return _LU_PREFIX + opname[len(_OP_PREFIX):] def _ComputeDispatchTable(): return dict((op, getattr(cmdlib, _LUNameForOpName(op.__name__))) for op in opcodes.OP_MAPPING.values() if op.WITH_LU) def _SetBaseOpParams(src, defcomment, dst): if hasattr(src, "debug_level"): dst.debug_level = src.debug_level if (getattr(dst, "priority", None) is None and hasattr(src, "priority")): dst.priority = src.priority if not getattr(dst, opcodes_base.COMMENT_ATTR, None): dst.comment = defcomment if hasattr(src, constants.OPCODE_REASON): dst.reason = list(getattr(dst, constants.OPCODE_REASON, [])) dst.reason.extend(getattr(src, constants.OPCODE_REASON, [])) def _ProcessResult(submit_fn, op, result): if isinstance(result, cmdlib.ResultWithJobs): for op2 in itertools.chain(*result.jobs): _SetBaseOpParams(op, "Submitted by %s" % op.OP_ID, op2) job_submission = submit_fn(result.jobs) result = result.other assert constants.JOB_IDS_KEY not in result, "Key '%s' found in additional return values" % constants.JOB_IDS_KEY result[constants.JOB_IDS_KEY] = job_submission return result def _FailingSubmitManyJobs(_): raise errors.ProgrammerError("Opcodes processed without callbacks (e.g." " queries) can not submit jobs") def _LockList(names): if names == locking.ALL_SET: return names elif isinstance(names, str): return [names] else: return list(names) def _CheckSecretParameters(op): if hasattr(op, "osparams_secret") and op.osparams_secret: for secret_param in op.osparams_secret: if op.osparams_secret[secret_param].Get() == constants.REDACTED: raise errors.OpPrereqError("Please re-submit secret parameters to job.", errors.ECODE_INVAL) class Processor(object): DISPATCH_TABLE = _ComputeDispatchTable() def __init__(self, context, ec_id, enable_locks=True): self._ec_id = ec_id self._cbs = None self.cfg = context.GetConfig(ec_id) self.rpc = context.GetRpc(self.cfg) self.hmclass = hooksmaster.HooksMaster self._enable_locks = enable_locks self.wconfd = wconfd self._wconfdcontext = context.GetWConfdContext(ec_id) def _CheckLocksEnabled(self): if not self._enable_locks: raise errors.ProgrammerError("Attempted to use disabled locks") def _RequestAndWait(self, request, timeout): logging.debug("Trying %ss to request %s for %s", timeout, request, self._wconfdcontext) if self._cbs: priority = self._cbs.CurrentPriority() else: priority = None if priority is None: priority = constants.OP_PRIO_DEFAULT if sighupReceived[0]: logging.warning("Ignoring unexpected SIGHUP") sighupReceived[0] = False self.wconfd.Client().UpdateLocksWaiting(self._wconfdcontext, priority, request) pending = self.wconfd.Client().HasPendingRequest(self._wconfdcontext) if pending: def _HasPending(): if sighupReceived[0]: return self.wconfd.Client().HasPendingRequest(self._wconfdcontext) else: return True pending = utils.SimpleRetry(False, _HasPending, 0.05, timeout) signal = sighupReceived[0] if pending: pending = self.wconfd.Client().HasPendingRequest(self._wconfdcontext) if pending and signal: logging.warning("Ignoring unexpected SIGHUP") sighupReceived[0] = False logging.debug("Finished trying. Pending: %s", pending) if pending: raise LockAcquireTimeout()
BSD 2-Clause Simplified License
fxihub/hummingbird
src/backend/euxfel.py
EUxfelTrainTranslator._tr_event_id_sqs_pnccd
python
def _tr_event_id_sqs_pnccd(self, values, obj): timestamp = numpy.array(obj['timestamp.tid'], dtype='int') rec = Record('Timestamp', timestamp, ureg.s) rec.timestamp = [timestamp] values[rec.name] = rec
Translates euxfel train event ID from data source into a hummingbird one
https://github.com/fxihub/hummingbird/blob/0b1bdf5023b92090f31d9bc857e0854a805cf2cd/src/backend/euxfel.py#L326-L333
from __future__ import print_function import os import numpy import datetime, time from pytz import timezone from backend.event_translator import EventTranslator from backend.record import Record, add_record from backend import Worker from . import ureg import logging import ipc import karabo_bridge import numpy from hummingbird import parse_cmdline_args _argparser = None def add_cmdline_args(): global _argparser from utils.cmdline_args import argparser _argparser = argparser MAX_TRAIN_LENGTH = 352 class EUxfelTranslator(object): def __init__(self, state): self.timestamps = None self.library = 'karabo_bridge' cmdline_args = _argparser.parse_args() if 'EuXFEL/DataSource' in state: dsrc = state['EuXFEL/DataSource'] elif('EuXFEL' in state and 'DataSource' in state['EuXFEL']): dsrc = state['EuXFEL']['DataSource'] else: raise ValueError("You need to set the '[EuXFEL][DataSource]'" " in the configuration") self._data_format = "Calib" if 'EuXFEL/DataFormat' in state: self._data_format = state["EuXFEL/DataFormat"] if not self._data_format in ["Calib", "Raw"]: raise ValueError("You need to set the 'EuXFEL/DataFormat'" " in the configuration as 'Calib' or 'Raw'") self._sel_module = None if 'EuXFEL/SelModule' in state: self._sel_module = state['EuXFEL/SelModule'] self._max_train_age = 5 if 'EuXFEL/MaxTrainAge' in state: self._max_train_age = state['EuXFEL/MaxTrainAge'] first_cell = 1 if 'EuXFEL/FirstCell' in state: first_cell = state['EuXFEL/FirstCell'] last_cell = -1 if 'EuXFEL/LastCell' in state: last_cell = state['EuXFEL/LastCell'] + 1 bad_cells = [] if 'EuXFEL/BadCells' in state: bad_cells = list(state['EuXFEL/BadCells']) if 'EuXFEL/SlowSource' in state: slsrc = state['EuXFEL/SlowSource'] else: slsrc = None self._slow_keys = None if 'EuXFEL/SlowKeys' in state: self._slow_keys = list(state['EuXFEL/SlowKeys']) self._slow_update_rate = 1 if 'EuXFEL/SlowUpdate' in state: self._slow_update_rate = int(state['EuXFEL/SlowUpdate']) self._cell_filter = numpy.zeros(MAX_TRAIN_LENGTH, dtype='bool') self._cell_filter[first_cell:last_cell] = True for cell in bad_cells: self._cell_filter[cell] = False self._data_client = karabo_bridge.Client(dsrc) self._slow_cache = None self._slow_last_time = 0 self._slow_client = None if slsrc is not None: self._slow_client = karabo_bridge.Client(slsrc) self._n2c = {} if self._sel_module is None: self._n2c["SPB_DET_AGIPD1M-1/CAL/APPEND_CORRECTED"] = ['photonPixelDetectors', 'eventID'] self._n2c["SPB_DET_AGIPD1M-1/CAL/APPEND_RAW"] = ['photonPixelDetectors', 'eventID'] else: self._n2c["SPB_DET_AGIPD1M-1/DET/%dCH0:xtdf"%self._sel_module] = ['photonPixelDetectors', 'eventID'] self._n2c["SQS_NQS_PNCCD1MP/CAL/PNCCD_FMT-0:output"] = ['photonPixelDetectors', 'eventID'] self._n2c["SA3_XTD10_XGM/XGM/DOOCS:output"] = ['GMD', 'eventID'] self._n2c["SQS_DIGITIZER_UTC1/ADC/1:network"] = ["trace"] self._c2n = {} for k, v in self._n2c.items(): if type(v) is not list: v = [v] for v2 in v: self._c2n[v2] = self._c2n.get(v2, []) self._c2n[v2].append(k) self._s2c = {} if self._sel_module is None: self._s2c["SPB_DET_AGIPD1M-1/CAL/APPEND_CORRECTED"] = "AGIPD" self._s2c["SPB_DET_AGIPD1M-1/CAL/APPEND_RAW"] = "AGIPD" else: self._s2c["SPB_DET_AGIPD1M-1/DET/%dCH0:xtdf"%self._sel_module] = "AGIPD" self._s2c["SQS_NQS_PNCCD1MP/CAL/PNCCD_FMT-0:output"] = "pnCCD" self._s2c["SA3_XTD10_XGM/XGM/DOOCS:output"] = "GMD" def append_slow_data(self, buf, meta): do_update = (time.time() - self._slow_last_time) > self._slow_update_rate if do_update or self._slow_cache is None: self._slow_cache = self._slow_client.next() self._slow_last_time = time.time() if self._slow_keys is not None: for k in self._slow_keys: buf[k] = self._slow_cache[0][k] meta[k] = self._slow_cache[1][k] else: for k,v in self._slow_cache[0].items(): buf[k] = v for k,v in self._slow_cache[1].items(): meta[k] = v return buf, meta def next_train(self): buf, meta = self._data_client.next() if(self._slow_client is not None): buf, meta = self.append_slow_data(buf, meta) age = numpy.floor(time.time()) - int(meta[list(meta.keys())[0]]['timestamp.tid']) if age < self._max_train_age: return buf, meta else: return self.next_train() def event_keys(self, evt): native_keys = evt.keys() common_keys = set() for k in native_keys: for c in self._native_to_common(k): common_keys.add(c) return list(common_keys)+['analysis'] def _native_to_common(self, key): if(key in self._n2c): val = self._n2c[key] if type(val) is not list: val = [val] return val else: return [] def event_native_keys(self, evt): return evt.keys() def translate(self, evt, key): values = {} if(key in self._c2n): return self.translate_core(evt, key) elif(key == 'analysis'): return {} elif(key == 'stream'): return {} else: event_keys = evt.keys() values = {} found = False if key in event_keys: obj = evt[key] for subkey in obj.keys(): add_record(values, 'native', '%s[%s]' % (self._s2c[key], subkey), obj[subkey], ureg.ADU) return values else: print('%s not found in event' % (key)) def translate_core(self, evt, key): values = {} for k in self._c2n[key]: if k in evt: if key == 'eventID': self._tr_event_id_sqs_pnccd(values, evt[k]) elif key == 'photonPixelDetectors': self._tr_photon_detector_sqs_pnccd(values, evt[k], k) elif key == 'GMD': self._tr_gmd_sqs_pnccd(values, evt[k], k) elif key == "trace": self._tr_trace_sqs_pnccd(values, evt[k], k) else: raise RuntimeError('%s not yet supported with key %s' % (k, key)) return values class EUxfelTrainTranslator(EUxfelTranslator): def __init__(self, state): EUxfelTranslator.__init__(self, state) def next_event(self): train, metadata = self.next_train() for source, d in metadata.items(): for k,v in d.items(): train[source][k] = v return EventTranslator(train, self) def event_id(self, evt): return self.translate(evt, 'eventID')['Timestamp'].timestamp[0] def train_id(self, evt): return self.translate(evt, 'eventID')['Timestamp'].timestamp def _tr_photon_detector_spb(self, values, obj, evt_key): train_length = numpy.array(obj["image.pulseId"]).shape[-1] cells = self._cell_filter[:train_length] img = obj['image.data'][..., cells] gain = obj['image.gain'][..., cells] if self._sel_module is not None: img = img[numpy.newaxis] assert img.ndim == 4 if self._sel_module is not None: gain = gain[numpy.newaxis] if self._data_format == 'Raw': assert gain.ndim == 4 img = numpy.concatenate((img, gain), axis=0) elif self._data_format == 'Calib': pass else: raise NotImplementedError("DataFormat should be 'Calib' or 'Raw''") add_record(values, 'photonPixelDetectors', self._s2c[evt_key], img, ureg.ADU) def _tr_photon_detector_sqs_pnccd(self, values, obj, evt_key): img = obj['data.image'][...].squeeze() add_record(values, 'photonPixelDetectors', self._s2c[evt_key], img, ureg.ADU) def _tr_event_id_spb(self, values, obj): train_length = numpy.array(obj["image.pulseId"]).shape[-1] cells = self._cell_filter[:train_length] pulseid = numpy.array(obj["image.pulseId"][..., cells], dtype='int') tsec = numpy.array(obj['timestamp.sec'], dtype='int') tfrac = numpy.array(obj['timestamp.frac'], dtype='int') * 1e-18 timestamp = tsec + tfrac + (pulseid / 760.) time = numpy.array([datetime.datetime.fromtimestamp(t, tz=timezone('utc')) for t in timestamp]) rec = Record('Timestamp', time, ureg.s) rec.pulseId = pulseid rec.cellId = numpy.array(obj['image.cellId'][..., cells], dtype='int') rec.badCells = numpy.array(obj['image.cellId'][..., ~cells], dtype='int') rec.timestamp = timestamp values[rec.name] = rec
BSD 2-Clause Simplified License
skype4py/skype4py
Skype4Py/client.py
Client.OpenSearchDialog
python
def OpenSearchDialog(self): self.OpenDialog('SEARCH')
Opens search dialog.
https://github.com/skype4py/skype4py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/client.py#L222-L225
__docformat__ = 'restructuredtext en' import weakref from enums import * from errors import SkypeError from utils import * class Client(object): def __init__(self, Skype): self._SkypeRef = weakref.ref(Skype) def ButtonPressed(self, Key): self._Skype._DoCommand('BTN_PRESSED %s' % Key) def ButtonReleased(self, Key): self._Skype._DoCommand('BTN_RELEASED %s' % Key) def CreateEvent(self, EventId, Caption, Hint): self._Skype._DoCommand('CREATE EVENT %s CAPTION %s HINT %s' % (tounicode(EventId), quote(tounicode(Caption)), quote(tounicode(Hint)))) return PluginEvent(self._Skype, EventId) def CreateMenuItem(self, MenuItemId, PluginContext, CaptionText, HintText=u'', IconPath='', Enabled=True, ContactType=pluginContactTypeAll, MultipleContacts=False): cmd = 'CREATE MENU_ITEM %s CONTEXT %s CAPTION %s ENABLED %s' % (tounicode(MenuItemId), PluginContext, quote(tounicode(CaptionText)), cndexp(Enabled, 'true', 'false')) if HintText: cmd += ' HINT %s' % quote(tounicode(HintText)) if IconPath: cmd += ' ICON %s' % quote(path2unicode(IconPath)) if MultipleContacts: cmd += ' ENABLE_MULTIPLE_CONTACTS true' if PluginContext == pluginContextContact: cmd += ' CONTACT_TYPE_FILTER %s' % ContactType self._Skype._DoCommand(cmd) return PluginMenuItem(self._Skype, MenuItemId, CaptionText, HintText, Enabled) def Focus(self): self._Skype._Api.allow_focus(self._Skype.Timeout) self._Skype._DoCommand('FOCUS') def Minimize(self): self._Skype._DoCommand('MINIMIZE') def OpenAddContactDialog(self, Username=''): self.OpenDialog('ADDAFRIEND', Username) def OpenAuthorizationDialog(self, Username): self.OpenDialog('AUTHORIZATION', Username) def OpenBlockedUsersDialog(self): self.OpenDialog('BLOCKEDUSERS') def OpenCallHistoryTab(self): self.OpenDialog('CALLHISTORY') def OpenConferenceDialog(self): self.OpenDialog('CONFERENCE') def OpenContactsTab(self): self.OpenDialog('CONTACTS') def OpenDialog(self, Name, *Params): self._Skype._Api.allow_focus(self._Skype.Timeout) params = filter(None, (str(Name),) + Params) self._Skype._DoCommand('OPEN %s' % tounicode(' '.join(params))) def OpenDialpadTab(self): self.OpenDialog('DIALPAD') def OpenFileTransferDialog(self, Username, Folder): self.OpenDialog('FILETRANSFER', Username, 'IN', path2unicode(Folder)) def OpenGettingStartedWizard(self): self.OpenDialog('GETTINGSTARTED') def OpenImportContactsWizard(self): self.OpenDialog('IMPORTCONTACTS') def OpenLiveTab(self): self.OpenDialog('LIVETAB') def OpenMessageDialog(self, Username, Text=u''): self.OpenDialog('IM', Username, tounicode(Text)) def OpenOptionsDialog(self, Page=''): self.OpenDialog('OPTIONS', Page) def OpenProfileDialog(self): self.OpenDialog('PROFILE')
BSD 3-Clause New or Revised License
googleapis/python-compute
google/cloud/compute_v1/services/url_maps/transports/rest.py
UrlMapsRestTransport.patch
python
def patch( self, request: compute.PatchUrlMapRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: body = compute.UrlMap.to_json( request.url_map_resource, including_default_value_fields=False, use_integers_for_enums=False, ) url = "https://{host}/compute/v1/projects/{project}/global/urlMaps/{url_map}".format( host=self._host, project=request.project, url_map=request.url_map, ) query_params = {} if compute.PatchUrlMapRequest.request_id in request: query_params["requestId"] = request.request_id headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.patch( url, headers=headers, params=query_params, data=body, ) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
r"""Call the patch method over HTTP. Args: request (~.compute.PatchUrlMapRequest): The request object. A request message for UrlMaps.Patch. See the method description for details. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: - `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__ \* `Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources. (== resource_for {$api_version}.globalOperations ==) (== resource_for {$api_version}.regionOperations ==) (== resource_for {$api_version}.zoneOperations ==)
https://github.com/googleapis/python-compute/blob/703ac1703bc159dcd81e96759606ad896f125996/google/cloud/compute_v1/services/url_maps/transports/rest.py#L533-L614
import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import gapic_v1 from google.api_core import exceptions as core_exceptions from google.auth import credentials as ga_credentials from google.auth.transport.grpc import SslCredentials import grpc from google.auth.transport.requests import AuthorizedSession from google.cloud.compute_v1.types import compute from .base import UrlMapsTransport, DEFAULT_CLIENT_INFO class UrlMapsRestTransport(UrlMapsTransport): def __init__( self, *, host: str = "compute.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) self._session = AuthorizedSession( self._credentials, default_host=self.DEFAULT_HOST ) if client_cert_source_for_mtls: self._session.configure_mtls_channel(client_cert_source_for_mtls) self._prep_wrapped_messages(client_info) def aggregated_list( self, request: compute.AggregatedListUrlMapsRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.UrlMapsAggregatedList: url = "https://{host}/compute/v1/projects/{project}/aggregated/urlMaps".format( host=self._host, project=request.project, ) query_params = {} if compute.AggregatedListUrlMapsRequest.filter in request: query_params["filter"] = request.filter if compute.AggregatedListUrlMapsRequest.include_all_scopes in request: query_params["includeAllScopes"] = request.include_all_scopes if compute.AggregatedListUrlMapsRequest.max_results in request: query_params["maxResults"] = request.max_results if compute.AggregatedListUrlMapsRequest.order_by in request: query_params["orderBy"] = request.order_by if compute.AggregatedListUrlMapsRequest.page_token in request: query_params["pageToken"] = request.page_token if compute.AggregatedListUrlMapsRequest.return_partial_success in request: query_params["returnPartialSuccess"] = request.return_partial_success headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers, params=query_params,) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.UrlMapsAggregatedList.from_json( response.content, ignore_unknown_fields=True ) def delete( self, request: compute.DeleteUrlMapRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: url = "https://{host}/compute/v1/projects/{project}/global/urlMaps/{url_map}".format( host=self._host, project=request.project, url_map=request.url_map, ) query_params = {} if compute.DeleteUrlMapRequest.request_id in request: query_params["requestId"] = request.request_id headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.delete(url, headers=headers, params=query_params,) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True) def get( self, request: compute.GetUrlMapRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.UrlMap: url = "https://{host}/compute/v1/projects/{project}/global/urlMaps/{url_map}".format( host=self._host, project=request.project, url_map=request.url_map, ) query_params = {} headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers, params=query_params,) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.UrlMap.from_json(response.content, ignore_unknown_fields=True) def insert( self, request: compute.InsertUrlMapRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: body = compute.UrlMap.to_json( request.url_map_resource, including_default_value_fields=False, use_integers_for_enums=False, ) url = "https://{host}/compute/v1/projects/{project}/global/urlMaps".format( host=self._host, project=request.project, ) query_params = {} if compute.InsertUrlMapRequest.request_id in request: query_params["requestId"] = request.request_id headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.post( url, headers=headers, params=query_params, data=body, ) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True) def invalidate_cache( self, request: compute.InvalidateCacheUrlMapRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: body = compute.CacheInvalidationRule.to_json( request.cache_invalidation_rule_resource, including_default_value_fields=False, use_integers_for_enums=False, ) url = "https://{host}/compute/v1/projects/{project}/global/urlMaps/{url_map}/invalidateCache".format( host=self._host, project=request.project, url_map=request.url_map, ) query_params = {} if compute.InvalidateCacheUrlMapRequest.request_id in request: query_params["requestId"] = request.request_id headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.post( url, headers=headers, params=query_params, data=body, ) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True) def list( self, request: compute.ListUrlMapsRequest, *, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.UrlMapList: url = "https://{host}/compute/v1/projects/{project}/global/urlMaps".format( host=self._host, project=request.project, ) query_params = {} if compute.ListUrlMapsRequest.filter in request: query_params["filter"] = request.filter if compute.ListUrlMapsRequest.max_results in request: query_params["maxResults"] = request.max_results if compute.ListUrlMapsRequest.order_by in request: query_params["orderBy"] = request.order_by if compute.ListUrlMapsRequest.page_token in request: query_params["pageToken"] = request.page_token if compute.ListUrlMapsRequest.return_partial_success in request: query_params["returnPartialSuccess"] = request.return_partial_success headers = dict(metadata) headers["Content-Type"] = "application/json" response = self._session.get(url, headers=headers, params=query_params,) if response.status_code >= 400: raise core_exceptions.from_http_response(response) return compute.UrlMapList.from_json( response.content, ignore_unknown_fields=True )
Apache License 2.0
autonomousvision/data_aggregation
configs/coil_global.py
set_type_of_process
python
def set_type_of_process(process_type, param=None): if _g_conf.PROCESS_NAME == "default": raise RuntimeError(" You should merge with some exp file before setting the type") if process_type == 'train': _g_conf.PROCESS_NAME = process_type elif process_type == "validation": _g_conf.PROCESS_NAME = process_type + '_' + param if process_type == "drive": _g_conf.CITY_NAME = param.split('_')[-1] _g_conf.PROCESS_NAME = process_type + '_' + param create_log(_g_conf.EXPERIMENT_BATCH_NAME, _g_conf.EXPERIMENT_NAME, _g_conf.PROCESS_NAME, _g_conf.LOG_SCALAR_WRITING_FREQUENCY, _g_conf.LOG_IMAGE_WRITING_FREQUENCY) if process_type == "train": if not os.path.exists(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME, _g_conf.EXPERIMENT_NAME, 'checkpoints') ): os.mkdir(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME, _g_conf.EXPERIMENT_NAME, 'checkpoints')) if process_type == "validation" or process_type == 'drive': if not os.path.exists(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME, _g_conf.EXPERIMENT_NAME, _g_conf.PROCESS_NAME + '_csv')): os.mkdir(os.path.join('_logs', _g_conf.EXPERIMENT_BATCH_NAME, _g_conf.EXPERIMENT_NAME, _g_conf.PROCESS_NAME + '_csv')) add_message('Loading', {'ProcessName': _g_conf.EXPERIMENT_GENERATED_NAME, 'FullConfiguration': _g_conf.TRAIN_DATASET_NAME + 'dict'}) _g_conf.immutable(True)
This function is used to set which is the type of the current process, test, train or val and also the details of each since there could be many vals and tests for a single experiment. NOTE: AFTER CALLING THIS FUNCTION, THE CONFIGURATION CLOSES Args: type: Returns:
https://github.com/autonomousvision/data_aggregation/blob/76777156a465cbb77d6d5ab88da8f1812e7ff043/configs/coil_global.py#L151-L208
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from ast import literal_eval from coilutils import AttributeDict import copy import numpy as np import os import yaml from configs.namer import generate_name from logger.coil_logger import create_log, add_message _g_conf = AttributeDict() _g_conf.immutable(False) _g_conf.NUMBER_OF_LOADING_WORKERS = 12 _g_conf.FINISH_ON_VALIDATION_STALE = None _g_conf.SENSORS = {'rgb': (3, 88, 200)} _g_conf.MEASUREMENTS = {'float_data': (31)} _g_conf.TARGETS = ['steer', 'throttle', 'brake'] _g_conf.INPUTS = ['speed_module'] _g_conf.INTENTIONS = [] _g_conf.BALANCE_DATA = True _g_conf.STEERING_DIVISION = [0.05, 0.05, 0.1, 0.3, 0.3, 0.1, 0.05, 0.05] _g_conf.PEDESTRIAN_PERCENTAGE = 0 _g_conf.SPEED_DIVISION = [] _g_conf.LABELS_DIVISION = [[0, 2, 5], [3], [4]] _g_conf.BATCH_SIZE = 120 _g_conf.SPLIT = None _g_conf.REMOVE = None _g_conf.AUGMENTATION = None _g_conf.DATA_USED = 'all' _g_conf.USE_NOISE_DATA = True _g_conf.TRAIN_DATASET_NAME = '1HoursW1-3-6-8' _g_conf.LOG_SCALAR_WRITING_FREQUENCY = 2 _g_conf.LOG_IMAGE_WRITING_FREQUENCY = 1000 _g_conf.EXPERIMENT_BATCH_NAME = "eccv" _g_conf.EXPERIMENT_NAME = "default" _g_conf.EXPERIMENT_GENERATED_NAME = None _g_conf.PROCESS_NAME = "None" _g_conf.NUMBER_ITERATIONS = 20000 _g_conf.SAVE_SCHEDULE = range(0, 2000, 200) _g_conf.NUMBER_FRAMES_FUSION = 1 _g_conf.NUMBER_IMAGES_SEQUENCE = 1 _g_conf.SEQUENCE_STRIDE = 1 _g_conf.TEST_SCHEDULE = range(0, 2000, 200) _g_conf.SPEED_FACTOR = 12.0 _g_conf.AUGMENT_LATERAL_STEERINGS = 6 _g_conf.NUMBER_OF_HOURS = 1 _g_conf.WEATHERS = [1, 3, 6, 8] _g_conf.PRELOAD_MODEL_BATCH = None _g_conf.PRELOAD_MODEL_ALIAS = None _g_conf.PRELOAD_MODEL_CHECKPOINT = None _g_conf.TRAIN_SET_EPISODE_LIMIT = 350 _g_conf.SAMPLE = None _g_conf.DART_COVMAT_DATA = "" _g_conf.DART_MODEL_CHECKPOINT = "" _g_conf.PRE_TRAINED_DIRECTORY = "" _g_conf.COIL_DATASET_PATH = "" _g_conf.COVARIANCE_MATRIX_PATH = "" _g_conf.VALIDATION_CHECKPOINT_PATH = "" _g_conf.VALIDATION_CHECKPOINT_ITERATION = "" _g_conf.MODEL_TYPE = 'coil_icra' _g_conf.MODEL_CONFIGURATION = {} _g_conf.PRE_TRAINED = False _g_conf.MAGICAL_SEED = 42 _g_conf.LEARNING_RATE_DECAY_INTERVAL = 50000 _g_conf.LEARNING_RATE_DECAY_LEVEL = 0.5 _g_conf.LEARNING_RATE_THRESHOLD = 1000 _g_conf.LEARNING_RATE = 0.0002 _g_conf.BRANCH_LOSS_WEIGHT = [0.95, 0.95, 0.95, 0.95, 0.05] _g_conf.VARIABLE_WEIGHT = {'Steer': 0.5, 'Gas': 0.45, 'Brake': 0.05} _g_conf.USED_LAYERS_ATT = [] _g_conf.LOSS_FUNCTION = 'L2' _g_conf.IMAGE_CUT = [115, 510] _g_conf.USE_ORACLE = False _g_conf.USE_FULL_ORACLE = False _g_conf.AVOID_STOPPING = False def merge_with_yaml(yaml_filename): global _g_conf with open(yaml_filename, 'r') as f: yaml_file = yaml.load(f) yaml_cfg = AttributeDict(yaml_file) _merge_a_into_b(yaml_cfg, _g_conf) path_parts = os.path.split(yaml_filename) _g_conf.EXPERIMENT_BATCH_NAME = os.path.split(path_parts[-2])[-1] _g_conf.EXPERIMENT_NAME = path_parts[-1].split('.')[-2] _g_conf.EXPERIMENT_GENERATED_NAME = generate_name(_g_conf) def get_names(folder): alias_in_folder = os.listdir(os.path.join('configs', folder)) experiments_in_folder = {} for experiment_alias in alias_in_folder: g_conf.immutable(False) merge_with_yaml(os.path.join('configs', folder, experiment_alias)) experiments_in_folder.update({experiment_alias: g_conf.EXPERIMENT_GENERATED_NAME}) return experiments_in_folder
MIT License
pyglet/pyglet
pyglet/text/runlist.py
AbstractRunIterator.__getitem__
python
def __getitem__(self, index):
Get the value at a given index. See the class documentation for examples of valid usage. :Parameters: `index` : int Document position to query. :rtype: object
https://github.com/pyglet/pyglet/blob/b9a63ea179735c8f252ac31d51751bdf8a741c9d/pyglet/text/runlist.py#L262-L272
class _Run: def __init__(self, value, count): self.value = value self.count = count def __repr__(self): return 'Run(%r, %d)' % (self.value, self.count) class RunList: def __init__(self, size, initial): self.runs = [_Run(initial, size)] def insert(self, pos, length): i = 0 for run in self.runs: if i <= pos <= i + run.count: run.count += length i += run.count def delete(self, start, end): i = 0 for run in self.runs: if end - start == 0: break if i <= start <= i + run.count: trim = min(end - start, i + run.count - start) run.count -= trim end -= trim i += run.count self.runs = [r for r in self.runs if r.count > 0] if not self.runs: self.runs = [_Run(run.value, 0)] def set_run(self, start, end, value): if end - start <= 0: return i = 0 start_i = None start_trim = 0 end_i = None end_trim = 0 for run_i, run in enumerate(self.runs): count = run.count if i < start < i + count: start_i = run_i start_trim = start - i if i < end < i + count: end_i = run_i end_trim = end - i i += count if start_i is not None: run = self.runs[start_i] self.runs.insert(start_i, _Run(run.value, start_trim)) run.count -= start_trim if end_i is not None: if end_i == start_i: end_trim -= start_trim end_i += 1 if end_i is not None: run = self.runs[end_i] self.runs.insert(end_i, _Run(run.value, end_trim)) run.count -= end_trim i = 0 for run in self.runs: if start <= i and i + run.count <= end: run.value = value i += run.count last_run = self.runs[0] for run in self.runs[1:]: if run.value == last_run.value: run.count += last_run.count last_run.count = 0 last_run = run self.runs = [r for r in self.runs if r.count > 0] def __iter__(self): i = 0 for run in self.runs: yield i, i + run.count, run.value i += run.count def get_run_iterator(self): return RunIterator(self) def __getitem__(self, index): i = 0 for run in self.runs: if i <= index < i + run.count: return run.value i += run.count if index == i: return self.runs[-1].value raise IndexError def __repr__(self): return str(list(self)) class AbstractRunIterator:
BSD 3-Clause New or Revised License
maralla/completor.vim
pythonx/completor/__init__.py
Completor.gen_request
python
def gen_request(self, action=b'complete', args=None): req = self.prepare_request(action=action) if req and req[-1] != '\n': req += '\n' return req
Internal wrapper for preparing a request.
https://github.com/maralla/completor.vim/blob/6ca5f498afe5fe9c659751aef54ef7f2fdc62414/pythonx/completor/__init__.py#L373-L379
import importlib import json import logging import os import re import shlex import threading from os.path import expanduser from ._vim import vim_obj as vim from ._vim import vim_expand, vim_tempname, vim_support_popup, vim_action_trigger, vim_in_comment_or_string, vim_daemon_send from ._vim import vim_exists from .compat import integer_types, to_bytes, to_unicode from ._log import config_logging LIMIT = 50 COMMON_LIMIT = 10 _arg_cache = {} _ctx = threading.local() class LogFilter(object): def filter(self, record): return bool(Completor.get_option('debug')) config_logging('completor.LogFilter') logger = logging.getLogger('completor') def get_encoding(): return to_unicode( vim.current.buffer.options['fileencoding'] or vim.options['encoding'] or 'utf-8', 'utf-8') def _unicode(text): encoding = get_encoding() try: return to_unicode(text, encoding) except Exception: return text def _read_args(path): try: with open(path) as f: args = shlex.split(f.read(), comments=True, posix=True) return [os.path.expandvars(a) for a in args] except Exception: return [] class Meta(type): registry = {} def __new__(mcls, name, bases, attrs): cls = type.__new__(mcls, name, bases, attrs) if name not in ('Completor', 'Base'): mcls.registry[to_unicode(cls.filetype, 'utf-8')] = cls() return cls Base = Meta('Base', (object, ), {}) class Unusable(object): def __get__(self, inst, owner): raise RuntimeError('unusable') class Completor(Base): filetype = Unusable() daemon = False sync = False trigger = None ident = re.compile(r'\w+', re.U) meta = None _exited = False def __init__(self): self.input_data = '' self.ft = '' self.ft_orig = '' self.ft_args = {} self.stream_buf = [] def copy_to(self, comp): comp.ft = self.ft comp.ft_orig = self.ft_orig comp.ft_args = self.ft_args comp.input_data = self.input_data @property def current_directory(self): return to_unicode(vim_expand('%:p:h'), 'utf-8') @property def tempname(self): return to_unicode(vim_tempname(), 'utf-8') @property def support_popup(self): return vim_support_popup() == 1 @property def filename(self): return vim.current.buffer.name @property def cursor_word(self): try: return to_unicode(vim_expand('<cword>'), get_encoding()) except vim.error: pass @property def cursor_line(self): try: line, _ = vim.current.window.cursor return to_unicode(vim.current.buffer[line - 1], get_encoding()) except vim.error: pass @property def cursor(self): return vim.current.window.cursor @cursor.setter def cursor(self, value): vim.current.window.cursor = value @staticmethod def get_option(key): option = vim.vars.get('completor_{}'.format(key)) if option and key.endswith('_binary'): option = expanduser(option) return option @property def disabled(self): enable_types = self.get_option('enable_{}'.format(self.filetype)) if isinstance(enable_types, (list, vim.List)): return to_bytes(self.ft) not in enable_types else: disable_types = self.get_option('disable_{}'.format(self.filetype)) if isinstance(disable_types, integer_types): return bool(disable_types) if isinstance(disable_types, (list, vim.List)): return to_bytes(self.ft) in disable_types return False @staticmethod def daemon_send(data): return vim_daemon_send(data) def match(self, input_data): if self.trigger is None: return True if isinstance(self.trigger, str): self.trigger = re.compile(self.trigger, re.X | re.U) return bool(self.trigger.search(input_data)) def format_cmd(self): return '' def get_cmd_info(self, action): if action == b'complete': return vim.Dictionary( cmd=self.format_cmd(), ftype=self.filetype, is_daemon=self.daemon, is_sync=self.sync) return vim.Dictionary() def _do_complete(self, data): ret = [] if callable(getattr(self, 'parse', None)): ret.extend(self.parse(data)) else: ret.extend(self.on_complete(data)) if ret and not isinstance(ret[0], (dict, vim.Dictionary)): offset = self.start_column() for i, e in enumerate(ret): ret[i] = {'word': e, 'offset': offset} common = get('common') if not common.is_common(self): if ret and 'offset' not in ret[0]: offset = self.start_column() for item in ret: item['offset'] = offset if len(ret) < LIMIT/2: self.copy_to(common) ret.extend(common.parse(self.input_data)[:COMMON_LIMIT]) if not self.support_popup and ret: offset = ret[0]['offset'] for i, item in enumerate(ret): if item['offset'] != offset: ret = ret[:i] break return ret def on_stream(self, action, msg): for line in msg.split(b'\n'): if not line: continue self.stream_buf.append(line) if self.is_message_end(line): data = self.stream_buf self.stream_buf = [] return self.on_data(action, data) def handle_stream(self, name, action, msg): c = get_current_completer() logger.info("%s %s", c.filetype, name) if c and c.filetype != name: self.stream_buf = [] return res = self.on_stream(action, msg) if res is None: return try: vim_action_trigger(res) except vim.error as e: logger.exception(e) def on_data(self, action, data): action = action.decode('ascii') if not isinstance(data, (list, vim.List)): data = _unicode(data) if action == 'complete': return self._do_complete(data) try: return getattr(self, 'on_' + action)(data) except AttributeError: return [] @staticmethod def find_config_file(file): cwd = os.getcwd() while True: path = os.path.join(cwd, file) if os.path.exists(path): return path dirname = os.path.dirname(cwd) if dirname == cwd: break cwd = dirname def parse_config(self, files): if not isinstance(files, (list, tuple)): files = [files] for f in files: key = '{}-{}'.format(self.filetype, f) arg = _arg_cache.get(key) if arg: return arg if arg is not None: continue path = self.find_config_file(f) arg = [] if path is None else _read_args(path) _arg_cache[key] = arg if arg: return arg return [] def ident_match(self, pat): if not self.input_data: return -1 data = self.input_data index = len(self.input_data) for i in range(index): text = self.input_data[i:] matched = pat.match(text) if matched and matched.end() == len(text): data = self.input_data[:i] break return len(to_bytes(data, get_encoding())) def start_column(self): if not self.ident: return -1 if isinstance(self.ident, str): self.ident = re.compile(self.ident, re.U | re.X) return self.ident_match(self.ident) def request(self): line, _ = self.cursor col = len(self.input_data) return json.dumps({ 'line': line - 1, 'col': col, 'filename': self.filename, 'content': '\n'.join(vim.current.buffer[:]) }) def prepare_request(self, action=b'complete'): if action == b'complete': return self.request() return ''
MIT License
crypto-toolbox/btfxwss
btfxwss/client.py
BtfxWss.trades
python
def trades(self, pair): key = ('trades', pair) return self.queue_processor.trades[key]
Return a queue containing all received trades data. :param pair: :return: Queue()
https://github.com/crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/client.py#L122-L129
import logging import time from btfxwss.connection import WebSocketConnection from btfxwss.queue_processor import QueueProcessor log = logging.getLogger(__name__) def is_connected(func): def wrapped(self, *args, **kwargs): if self.conn and self.conn.connected.is_set(): return func(self, *args, **kwargs) else: log.error("Cannot call %s() on unestablished connection!", func.__name__) return None return wrapped class BtfxWss: def __init__(self, key=None, secret=None, log_level=None, **wss_kwargs): self.key = key if key else '' self.secret = secret if secret else '' self.conn = WebSocketConnection(log_level=log_level, **wss_kwargs) self.queue_processor = QueueProcessor(self.conn.q, log_level=log_level) @property def channel_configs(self): return self.conn.channel_configs @property def account(self): return self.queue_processor.account def start(self): self.conn.start() self.queue_processor.start() def stop(self): self.conn.disconnect() self.queue_processor.join() def reset(self): self.conn.reconnect() while not self.conn.connected.is_set(): log.info("reset(): Waiting for connection to be set up..") time.sleep(1) for key in self.channel_configs: self.conn.send(**self.channel_configs[key]) def tickers(self, pair): key = ('ticker', pair) return self.queue_processor.tickers[key] def books(self, pair): key = ('book', pair) return self.queue_processor.books[key] def raw_books(self, pair): key = ('raw_book', pair) return self.queue_processor.raw_books[key]
MIT License
griquelme/tidyms
tidyms/lcms.py
_build_roi
python
def _build_roi(roi: _TemporaryRoi, rt: np.ndarray, valid_scan: np.ndarray, start: int, mode: str) -> Roi: first_scan = roi.scan[0] last_scan = roi.scan[-1] size = last_scan + 1 - first_scan mz_tmp = np.ones(size) * np.nan spint_tmp = mz_tmp.copy() scan_index = np.array(roi.scan) - roi.scan[0] mz_tmp[scan_index] = roi.mz spint_tmp[scan_index] = roi.sp start_ind, end_ind = np.searchsorted(valid_scan, [first_scan, last_scan + 1]) scan_tmp = valid_scan[start_ind:end_ind].copy() valid_index = scan_tmp - first_scan mz_tmp = mz_tmp[valid_index] spint_tmp = spint_tmp[valid_index] rt_tmp = rt[scan_tmp - start].copy() assert rt_tmp.size == mz_tmp.size assert rt_tmp.size == spint_tmp.size assert rt_tmp.size == scan_tmp.size roi = Roi(spint_tmp, mz_tmp, rt_tmp, scan_tmp, mode=mode) return roi
Convert to a ROI object Parameters ---------- rt: array array of retention times associated to each scan valid_scan : array array of scans associated used to build the Rois. start : int first scan used to create ROI mode : mode to pass to ROI creation. Returns -------
https://github.com/griquelme/tidyms/blob/dd8e6f3ea60dea8efca0fb6bac73362b2a2457be/tidyms/lcms.py#L904-L956
import bokeh.plotting import numpy as np import pyopenms from collections import deque from collections import namedtuple from scipy.interpolate import interp1d from scipy.ndimage import gaussian_filter1d from typing import Optional, Iterable, Tuple, Union, List, Callable from . import peaks from . import _plot_bokeh from .utils import find_closest ms_experiment_type = Union[pyopenms.MSExperiment, pyopenms.OnDiscMSExperiment] class MSSpectrum: def __init__(self, mz: np.ndarray, spint: np.ndarray, instrument: Optional[str] = None): self.mz = mz self.spint = spint if instrument is None: instrument = "qtof" self.instrument = instrument @property def instrument(self) -> str: return self._instrument @instrument.setter def instrument(self, value): valid_values = ["qtof", "orbitrap"] if value in valid_values: self._instrument = value else: msg = "instrument must be one of {}".format(valid_values) raise ValueError(msg) def find_centroids(self, min_snr: Optional[float] = None, min_distance: Optional[float] = None ) -> Tuple[np.ndarray, np.ndarray]: params = get_find_centroid_params(self.instrument) if min_distance is not None: params["min_distance"] = min_distance if min_snr is not None: params["min_snr"] = min_snr centroids, area = peaks.find_centroids(self.mz, self.spint, **params) return centroids, area def plot(self, draw: bool = True, fig_params: Optional[dict] = None, line_params: Optional[dict] = None) -> bokeh.plotting.Figure: return _plot_bokeh.plot_ms_spectrum(self.mz, self.spint, draw=draw, fig_params=fig_params, line_params=line_params) class Chromatogram: def __init__(self, rt: np.ndarray, spint: np.ndarray, mode: str = "uplc"): self.mode = mode self.rt = rt self.spint = spint self.peaks = None @property def mode(self): return self._mode @mode.setter def mode(self, value): valid_values = ["uplc", "hplc"] if value in valid_values: self._mode = value else: msg = "mode must be one of {}".format(valid_values) raise ValueError(msg) def find_peaks(self, smoothing_strength: Optional[float] = 1.0, descriptors: Optional[dict] = None, filters: Optional[dict] = None, noise_params: Optional[dict] = None, baseline_params: Optional[dict] = None, find_peaks_params: Optional[dict] = None, return_signal_estimators: bool = False) -> List[dict]: if noise_params is None: noise_params = dict() if baseline_params is None: baseline_params = dict() if find_peaks_params is None: find_peaks_params = dict() if filters is None: filters = get_lc_filter_peak_params(self.mode) noise = peaks.estimate_noise(self.spint, **noise_params) if smoothing_strength is None: x = self.spint else: x = gaussian_filter1d(self.spint, smoothing_strength) baseline = peaks.estimate_baseline(x, noise, **baseline_params) peak_list = peaks.detect_peaks(x, noise, baseline, **find_peaks_params) peak_list, peak_descriptors = peaks.get_peak_descriptors(self.rt, self.spint, noise, baseline, peak_list, descriptors=descriptors, filters=filters) self.peaks = peak_list if return_signal_estimators: estimators = {"smoothed": x, "noise": noise, "baseline": baseline} res = peak_descriptors, estimators else: res = peak_descriptors return res def plot(self, draw: bool = True, fig_params: Optional[dict] = None, line_params: Optional[dict] = None) -> bokeh.plotting.Figure: return _plot_bokeh.plot_chromatogram(self.rt, self.spint, self.peaks, draw=draw, fig_params=fig_params, line_params=line_params) class Roi(Chromatogram): def __init__(self, spint: np.ndarray, mz: np.ndarray, rt: np.ndarray, scan: np.ndarray, mode: str = "uplc"): super(Roi, self).__init__(rt, spint, mode=mode) self.mz = mz self.scan = scan def fill_nan(self, fill_value: Optional[float] = None): if np.isnan(self.spint[0]): self.spint[0] = 0 if np.isnan(self.spint[-1]): self.spint[-1] = 0 missing = np.isnan(self.spint) mz_mean = np.nanmean(self.mz) if fill_value is None: interpolator = interp1d(self.rt[~missing], self.spint[~missing]) self.mz[missing] = mz_mean self.spint[missing] = interpolator(self.rt[missing]) else: self.mz[missing] = mz_mean self.spint[missing] = fill_value def get_peaks_mz(self): mz_std = np.zeros(len(self.peaks)) mz_mean = np.zeros(len(self.peaks)) for k, peak in enumerate(self.peaks): mz_std[k] = self.mz[peak.start:peak.end].std() mz_mean[k] = peak.get_loc(self.mz, self.spint) return mz_mean, mz_std def make_tic(ms_experiment: ms_experiment_type, kind: str, mode: str, ms_level: int): if kind == "tic": reduce = np.sum elif kind == "bpi": reduce = np.max else: msg = "valid modes are tic or bpi" raise ValueError(msg) n_scan = ms_experiment.getNrSpectra() rt = np.zeros(n_scan) tic = np.zeros(n_scan) valid_index = list() for k, sp in _get_spectra_iterator(ms_experiment, ms_level, 0, n_scan): valid_index.append(k) rt[k] = sp.getRT() _, spint = sp.get_peaks() tic[k] = reduce(spint) tic = tic[valid_index] rt = rt[valid_index] return Chromatogram(rt, tic, mode) def make_chromatograms(ms_experiment: ms_experiment_type, mz: Iterable[float], window: float = 0.05, start: int = 0, end: Optional[int] = None, accumulator: str = "sum", chromatogram_mode: str = "uplc", ms_level: int = 1 ) -> List[Chromatogram]: nsp = ms_experiment.getNrSpectra() if not isinstance(mz, np.ndarray): mz = np.array(mz) if end is None: end = nsp mz_intervals = (np.vstack((mz - window, mz + window)) .T.reshape(mz.size * 2)) eic = np.zeros((mz.size, end - start)) rt = np.zeros(end - start) valid_index = list() for ksp, sp in _get_spectra_iterator(ms_experiment, ms_level, start, end): valid_index.append(ksp - start) rt[ksp - start] = sp.getRT() mz_sp, int_sp = sp.get_peaks() ind_sp = np.searchsorted(mz_sp, mz_intervals) has_mz = (ind_sp[1::2] - ind_sp[::2]) > 0 ind_sp[ind_sp >= int_sp.size] = int_sp.size - 1 tmp_eic = np.where(has_mz, np.add.reduceat(int_sp, ind_sp)[::2], 0) if accumulator == "mean": norm = ind_sp[1::2] - ind_sp[::2] norm[norm == 0] = 1 tmp_eic = tmp_eic / norm eic[:, ksp - start] = tmp_eic valid_index = np.array(valid_index) rt = rt[valid_index] eic = eic[:, valid_index] chromatograms = list() for row in eic: chromatogram = Chromatogram(rt.copy(), row, mode=chromatogram_mode) chromatograms.append(chromatogram) return chromatograms def make_roi(ms_experiment: ms_experiment_type, tolerance: float, max_missing: int, min_length: int, min_intensity: float, start: int = 0, end: Optional[int] = None, pad: int = 0, multiple_match: str = "reduce", mz_reduce: Union[str, Callable] = None, sp_reduce: Union[str, Callable] = "sum", targeted_mz: Optional[np.ndarray] = None, mode: str = "uplc", ms_level: int = 1 ) -> List[Roi]: if end is None: end = ms_experiment.getNrSpectra() if targeted_mz is None: mz_seed, _ = ms_experiment.getSpectrum(start).get_peaks() targeted = False else: mz_seed = targeted_mz targeted = True size = end - start rt = np.zeros(size) processor = _RoiMaker(mz_seed, max_missing=max_missing, min_length=min_length, min_intensity=min_intensity, tolerance=tolerance, multiple_match=multiple_match, mz_reduce=mz_reduce, sp_reduce=sp_reduce, targeted=targeted) valid_scan = list() for k, sp in _get_spectra_iterator(ms_experiment, ms_level, start, end): rt[k - start] = sp.getRT() mz, spint = sp.get_peaks() valid_scan.append(k) processor.extend_roi(mz, spint, k) processor.store_completed_roi() processor.flag_as_completed() processor.store_completed_roi() valid_scan = np.array(valid_scan) roi_list = list() for r in processor.roi: r = _TemporaryRoi(deque(r.mz), deque(r.sp), deque(r.scan)) _pad_roi(r, pad, valid_scan) r = _build_roi(r, rt, valid_scan, start, mode) roi_list.append(r) return roi_list def accumulate_spectra_profile(ms_experiment: ms_experiment_type, start: int, end: int, subtract_left: Optional[int] = None, subtract_right: Optional[int] = None, ms_level: int = 1, instrument: str = "qtof" ) -> MSSpectrum: if subtract_left is None: subtract_left = start if subtract_right is None: subtract_right = end mz, _ = ms_experiment.getSpectrum(start).get_peaks() accumulated_mz = _get_uniform_mz(mz) accumulated_sp = np.zeros_like(accumulated_mz) for scan, sp in _get_spectra_iterator(ms_experiment, ms_level, subtract_left, subtract_right): mz_scan, int_scan = sp.get_peaks() mz_min, mz_max = mz_scan.min(), mz_scan.max() min_ind, max_ind = np.searchsorted(accumulated_mz, [mz_min, mz_max]) interpolator = interp1d(mz_scan, int_scan, kind="linear") tmp_sp = interpolator(accumulated_mz[min_ind:max_ind]) if (scan < start) or (scan > end): accumulated_sp[min_ind:max_ind] -= tmp_sp else: accumulated_sp[min_ind:max_ind] += tmp_sp is_positive_sp = accumulated_sp > 0 accumulated_mz = accumulated_mz[is_positive_sp] accumulated_sp = accumulated_sp[is_positive_sp] return MSSpectrum(accumulated_mz, accumulated_sp, instrument=instrument) def accumulate_spectra_centroid(ms_experiment: ms_experiment_type, start: int, end: int, subtract_left: Optional[int] = None, subtract_right: Optional[int] = None, tolerance: Optional[float] = None, ms_level: int = 1) -> MSSpectrum: if subtract_left is None: subtract_left = start if subtract_right is None: subtract_right = end max_missing = subtract_right - subtract_left roi = make_roi(ms_experiment, tolerance, max_missing=max_missing, min_length=1, min_intensity=0.0, multiple_match="reduce", start=subtract_left, end=subtract_right, mz_reduce=None, sp_reduce="sum", mode="uplc", ms_level=ms_level) mz = np.zeros(len(roi)) spint = mz.copy() for k, r in enumerate(roi): accum_mask = - np.ones(r.scan.size) accum_start, accum_end = np.searchsorted(r.scan, [start, end]) accum_mask[accum_start:accum_end] = 1 mz[k] = np.nanmean(r.mz) spint[k] = np.nansum(r.spint * accum_mask) pos_values = spint > 0 mz = mz[pos_values] spint = spint[pos_values] sorted_index = np.argsort(mz) mz = mz[sorted_index] spint = spint[sorted_index] return MSSpectrum(mz, spint) def get_lc_filter_peak_params(lc_mode: str) -> dict: if lc_mode == "hplc": filters = {"width": (10, 90), "snr": (5, None)} elif lc_mode == "uplc": filters = {"width": (4, 60), "snr": (5, None)} else: msg = "`mode` must be `hplc` or `uplc`" raise ValueError(msg) return filters def get_roi_params(separation: str = "uplc", instrument: str = "qtof"): roi_params = {"min_intensity": 500, "multiple_match": "reduce"} if separation == "uplc": roi_params.update({"max_missing": 1, "min_length": 10, "pad": 2}) elif separation == "hplc": roi_params.update({"max_missing": 1, "min_length": 20, "pad": 2}) else: msg = "valid `separation` are uplc and hplc" raise ValueError(msg) if instrument == "qtof": roi_params.update({"tolerance": 0.01}) elif instrument == "orbitrap": roi_params.update({"tolerance": 0.005}) else: msg = "valid `instrument` are qtof and orbitrap" raise ValueError(msg) roi_params["mode"] = separation return roi_params def get_find_centroid_params(instrument: str): params = {"min_snr": 10} if instrument == "qtof": md = 0.01 else: md = 0.005 params["min_distance"] = md return params _TemporaryRoi = namedtuple("TemporaryRoi", ["mz", "sp", "scan"]) def _make_temporary_roi(): return _TemporaryRoi([], [], []) def _append_to__roi(roi: _TemporaryRoi, mz: float, sp: float, scan: int): roi.mz.append(mz) roi.sp.append(sp) roi.scan.append(scan) def _pad_roi(roi: _TemporaryRoi, n: int, valid_scan: np.ndarray): first_scan = roi.scan[0] last_scan = roi.scan[-1] start, end = np.searchsorted(valid_scan, [first_scan, last_scan + 1]) l_pad_index = max(0, start - n) nl = start - l_pad_index r_pad_index = min(valid_scan.size, end + n) nr = r_pad_index - end sp_max = max(roi.sp) sp_min = min(roi.sp) mz_fill = np.mean(roi.mz) sp_threshold = 0.75 * sp_max sp_fill_left = sp_max if (roi.sp[0] > sp_threshold) else sp_min roi.mz.extendleft([mz_fill] * nl) roi.sp.extendleft([sp_fill_left] * nl) roi.scan.extendleft(valid_scan[l_pad_index:start][::-1]) sp_fill_right = sp_max if (roi.sp[-1] > sp_threshold) else sp_min roi.mz.extend([mz_fill] * nr) roi.sp.extend([sp_fill_right] * nr) roi.scan.extend(valid_scan[end:r_pad_index])
BSD 3-Clause New or Revised License
vitruvianscience/opendeep
opendeep/utils/midi/MidiOutFile.py
MidiOutFile.sequencer_specific
python
def sequencer_specific(self, data): self.meta_slice(SEQUENCER_SPECIFIC, data)
data: The data as byte values
https://github.com/vitruvianscience/opendeep/blob/e96efc449101094354b615cf15afe6d03644fc36/opendeep/utils/midi/MidiOutFile.py#L313-L317
from __future__ import absolute_import from .MidiOutStream import MidiOutStream from .RawOutstreamFile import RawOutstreamFile from .constants import * from .DataTypeConverters import fromBytes, writeVar class MidiOutFile(MidiOutStream): def __init__(self, raw_out=''): self.raw_out = RawOutstreamFile(raw_out) MidiOutStream.__init__(self) def write(self): self.raw_out.write() def event_slice(self, slc): trk = self._current_track_buffer trk.writeVarLen(self.rel_time()) trk.writeSlice(slc) def note_on(self, channel=0, note=0x40, velocity=0x40): slc = fromBytes([NOTE_ON + channel, note, velocity]) self.event_slice(slc) def note_off(self, channel=0, note=0x40, velocity=0x40): slc = fromBytes([NOTE_OFF + channel, note, velocity]) self.event_slice(slc) def aftertouch(self, channel=0, note=0x40, velocity=0x40): slc = fromBytes([AFTERTOUCH + channel, note, velocity]) self.event_slice(slc) def continuous_controller(self, channel, controller, value): slc = fromBytes([CONTINUOUS_CONTROLLER + channel, controller, value]) self.event_slice(slc) def patch_change(self, channel, patch): slc = fromBytes([PATCH_CHANGE + channel, patch]) self.event_slice(slc) def channel_pressure(self, channel, pressure): slc = fromBytes([CHANNEL_PRESSURE + channel, pressure]) self.event_slice(slc) def pitch_bend(self, channel, value): msb = (value>>7) & 0xFF lsb = value & 0xFF slc = fromBytes([PITCH_BEND + channel, msb, lsb]) self.event_slice(slc) def system_exclusive(self, data): sysex_len = writeVar(len(data)+1) self.event_slice(chr(SYSTEM_EXCLUSIVE) + sysex_len + data + chr(END_OFF_EXCLUSIVE)) def midi_time_code(self, msg_type, values): value = (msg_type<<4) + values self.event_slice(fromBytes([MIDI_TIME_CODE, value])) def song_position_pointer(self, value): lsb = (value & 0x7F) msb = (value >> 7) & 0x7F self.event_slice(fromBytes([SONG_POSITION_POINTER, lsb, msb])) def song_select(self, songNumber): self.event_slice(fromBytes([SONG_SELECT, songNumber])) def tuning_request(self): self.event_slice(chr(TUNING_REQUEST)) def header(self, format=0, nTracks=1, division=96): raw = self.raw_out raw.writeSlice('MThd') bew = raw.writeBew bew(6, 4) bew(format, 2) bew(nTracks, 2) bew(division, 2) def eof(self): self.write() def meta_slice(self, meta_type, data_slice): slc = fromBytes([META_EVENT, meta_type]) + writeVar(len(data_slice)) + data_slice self.event_slice(slc) def meta_event(self, meta_type, data): self.meta_slice(meta_type, fromBytes(data)) def start_of_track(self, n_track=0): self._current_track_buffer = RawOutstreamFile() self.reset_time() self._current_track += 1 def end_of_track(self): raw = self.raw_out raw.writeSlice(TRACK_HEADER) track_data = self._current_track_buffer.getvalue() eot_slice = writeVar(self.rel_time()) + fromBytes([META_EVENT, END_OF_TRACK, 0]) raw.writeBew(len(track_data)+len(eot_slice), 4) raw.writeSlice(track_data) raw.writeSlice(eot_slice) def sequence_number(self, value): self.meta_slice(meta_type, writeBew(value, 2)) def text(self, text): self.meta_slice(TEXT, text) def copyright(self, text): self.meta_slice(COPYRIGHT, text) def sequence_name(self, text): self.meta_slice(SEQUENCE_NAME, text) def instrument_name(self, text): self.meta_slice(INSTRUMENT_NAME, text) def lyric(self, text): self.meta_slice(LYRIC, text) def marker(self, text): self.meta_slice(MARKER, text) def cuepoint(self, text): self.meta_slice(CUEPOINT, text) def midi_ch_prefix(self, channel): self.meta_slice(MIDI_CH_PREFIX, chr(channel)) def midi_port(self, value): self.meta_slice(MIDI_CH_PREFIX, chr(value)) def tempo(self, value): hb, mb, lb = (value>>16 & 0xff), (value>>8 & 0xff), (value & 0xff) self.meta_slice(TEMPO, fromBytes([hb, mb, lb])) def smtp_offset(self, hour, minute, second, frame, framePart): self.meta_slice(SMTP_OFFSET, fromBytes([hour, minute, second, frame, framePart])) def time_signature(self, nn, dd, cc, bb): self.meta_slice(TIME_SIGNATURE, fromBytes([nn, dd, cc, bb])) def key_signature(self, sf, mi): self.meta_slice(KEY_SIGNATURE, fromBytes([sf, mi]))
Apache License 2.0
inmanta/inmanta-core
src/inmanta/parser/plyInmantaParser.py
p_entity_body_outer_1
python
def p_entity_body_outer_1(p: YaccProduction) -> None: p[0] = (None, p[1])
entity_body_outer : entity_body END
https://github.com/inmanta/inmanta-core/blob/7e57295314e30276204b74ddcb8e2402c0a50b19/src/inmanta/parser/plyInmantaParser.py#L257-L259
import logging import re from typing import List, Optional, Union import ply.yacc as yacc from ply.yacc import YaccProduction import inmanta.warnings as inmanta_warnings from inmanta.ast import LocatableString, Location, Namespace, Range from inmanta.ast.blocks import BasicBlock from inmanta.ast.constraint.expression import IsDefined, Not, Operator from inmanta.ast.statements import Literal, Statement from inmanta.ast.statements.assign import CreateDict, CreateList, IndexLookup, MapLookup, ShortIndexLookup, StringFormat from inmanta.ast.statements.call import FunctionCall from inmanta.ast.statements.define import ( DefineAttribute, DefineEntity, DefineImplement, DefineImplementation, DefineImport, DefineIndex, DefineRelation, DefineTypeConstraint, DefineTypeDefault, TypeDeclaration, ) from inmanta.ast.statements.generator import ConditionalExpression, Constructor, For, If, WrappedKwargs from inmanta.ast.variables import AttributeReference, Reference from inmanta.execute.util import NoneValue from inmanta.parser import ParserException, SyntaxDeprecationWarning, plyInmantaLex from inmanta.parser.cache import CacheManager from inmanta.parser.plyInmantaLex import reserved, tokens LOGGER = logging.getLogger() file = "NOFILE" namespace = None precedence = ( ("right", ","), ("nonassoc", ":"), ("nonassoc", "?"), ("left", "OR"), ("left", "AND"), ("left", "CMP_OP"), ("nonassoc", "NOT"), ("left", "IN"), ("right", "MLS"), ("right", "MLS_END"), ) def attach_lnr(p: YaccProduction, token: int = 1) -> None: v = p[0] v.location = Location(file, p.lineno(token)) v.namespace = namespace v.lexpos = p.lexpos(token) def merge_lnr_to_string(p: YaccProduction, starttoken: int = 1, endtoken: int = 2) -> None: v = p[0] et = p[endtoken] endline = et.elnr endchar = et.end st = p[starttoken] if isinstance(st, LocatableString): startline = st.lnr startchar = st.start else: startline = et.lnr startchar = et.start p[0] = LocatableString(v, Range(file, startline, startchar, endline, endchar), endchar, namespace) def attach_from_string(p: YaccProduction, token: int = 1) -> None: v = p[0] v.location = p[token].location v.namespace = p[token].namespace def make_none(p: YaccProduction, token: int) -> Literal: none = Literal(NoneValue()) none.location = Location(file, p.lineno(token)) none.namespace = namespace none.lexpos = p.lexpos(token) return none def p_main_collect(p: YaccProduction) -> None: v = p[2] v.insert(0, p[1]) p[0] = v def p_main_term(p: YaccProduction) -> None: p[0] = [] def p_top_stmt(p: YaccProduction) -> None: p[0] = p[1] def p_empty(p: YaccProduction) -> None: pass def p_import(p: YaccProduction) -> None: p[0] = DefineImport(str(p[2]), str(p[2])) attach_lnr(p, 1) def p_import_1(p: YaccProduction) -> None: p[0] = DefineImport(str(p[2]), p[4]) attach_lnr(p, 1) def p_stmt(p: YaccProduction) -> None: p[0] = p[1] def p_stmt_list_collect(p: YaccProduction) -> None: v = p[2] v.append(p[1]) p[0] = v def p_stmt_list_empty(p: YaccProduction) -> None: p[0] = [] def p_assign(p: YaccProduction) -> None: p[0] = p[1].as_assign(p[3]) attach_lnr(p, 2) def p_assign_extend(p: YaccProduction) -> None: p[0] = p[1].as_assign(p[3], list_only=True) attach_lnr(p, 2) def p_for(p: YaccProduction) -> None: p[0] = For(p[4], p[2], BasicBlock(namespace, p[6])) attach_lnr(p, 1) def p_if(p: YaccProduction) -> None: p[0] = If(p[2], BasicBlock(namespace, p[4]), BasicBlock(namespace, [])) attach_lnr(p, 1) def p_if_else(p: YaccProduction) -> None: p[0] = If(p[2], BasicBlock(namespace, p[4]), BasicBlock(namespace, p[7])) attach_lnr(p, 1) def p_entity(p: YaccProduction) -> None: p[0] = DefineEntity(namespace, p[2], p[4][0], [], p[4][1]) attach_lnr(p) def p_entity_err_1(p: YaccProduction) -> None: raise ParserException(p[2].location, str(p[2]), "Invalid identifier: Entity names must start with a capital") def p_entity_extends(p: YaccProduction) -> None: p[0] = DefineEntity(namespace, p[2], p[6][0], p[4], p[6][1]) attach_lnr(p) def p_entity_extends_err(p: YaccProduction) -> None: raise ParserException(p[2].location, str(p[2]), "Invalid identifier: Entity names must start with a capital") def p_entity_body_outer(p: YaccProduction) -> None: p[0] = (p[1], p[2])
Apache License 2.0
openstack/cinder
cinder/backup/chunkeddriver.py
ChunkedBackupDriver._calculate_sha
python
def _calculate_sha(self, data): chunk = memoryview(data) shalist = [] off = 0 datalen = len(chunk) while off < datalen: chunk_end = min(datalen, off + self.sha_block_size_bytes) block = chunk[off:chunk_end] sha = hashlib.sha256(block).hexdigest() shalist.append(sha) off += self.sha_block_size_bytes return shalist
Calculate SHA256 of a data chunk. This method cannot log anything as it is called on a native thread.
https://github.com/openstack/cinder/blob/4558e4b53a7e41dc1263417a4824f39bb6fd30e1/cinder/backup/chunkeddriver.py#L484-L501
import abc import hashlib import json import os import sys import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import secretutils from oslo_utils import units from cinder.backup import driver from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import volume_utils if sys.platform == 'win32': from os_win import utilsfactory as os_win_utilsfactory LOG = logging.getLogger(__name__) backup_opts = [ cfg.StrOpt('backup_compression_algorithm', default='zlib', ignore_case=True, choices=[('none', 'Do not use compression'), ('off', "Same as 'none'"), ('no', "Same as 'none'"), ('zlib', 'Use the Deflate compression algorithm'), ('gzip', "Same as 'zlib'"), ('bz2', 'Use Burrows-Wheeler transform compression'), ('bzip2', "Same as 'bz2'"), ('zstd', 'Use the Zstandard compression algorithm')], help="Compression algorithm for backups ('none' to disable)"), ] CONF = cfg.CONF CONF.register_opts(backup_opts) class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta): DRIVER_VERSION = '1.0.0' DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'} def _get_compressor(self, algorithm): try: if algorithm.lower() in ('none', 'off', 'no'): return None if algorithm.lower() in ('zlib', 'gzip'): import zlib as compressor result = compressor elif algorithm.lower() in ('bz2', 'bzip2'): import bz2 as compressor result = compressor elif algorithm.lower() == 'zstd': import zstd as compressor result = compressor else: result = None if result: return eventlet.tpool.Proxy(result) except ImportError: pass err = _('unsupported compression algorithm: %s') % algorithm raise ValueError(err) def __init__( self, context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, ): super(ChunkedBackupDriver, self).__init__(context) self.chunk_size_bytes = chunk_size_bytes self.sha_block_size_bytes = sha_block_size_bytes self.backup_default_container = backup_default_container self.enable_progress_timer = enable_progress_timer self.backup_timer_interval = CONF.backup_timer_interval self.data_block_num = CONF.backup_object_number_per_notification self.az = CONF.storage_availability_zone self.backup_compression_algorithm = CONF.backup_compression_algorithm self.compressor = self._get_compressor(CONF.backup_compression_algorithm) self.support_force_delete = True if sys.platform == 'win32' and self.chunk_size_bytes % 4096: err = _("Invalid chunk size. It must be a multiple of 4096.") raise exception.InvalidConfigurationValue(message=err) def _get_object_writer(self, container, object_name, extra_metadata=None): writer = self.get_object_writer(container, object_name, extra_metadata) return eventlet.tpool.Proxy(writer) def _get_object_reader(self, container, object_name, extra_metadata=None): reader = self.get_object_reader(container, object_name, extra_metadata) return eventlet.tpool.Proxy(reader) @abc.abstractmethod def put_container(self, container): return @abc.abstractmethod def get_container_entries(self, container, prefix): return @abc.abstractmethod def get_object_writer(self, container, object_name, extra_metadata=None): return @abc.abstractmethod def get_object_reader(self, container, object_name, extra_metadata=None): return @abc.abstractmethod def delete_object(self, container, object_name): return @abc.abstractmethod def _generate_object_name_prefix(self, backup): return @abc.abstractmethod def update_container_name(self, backup, container): return @abc.abstractmethod def get_extra_metadata(self, backup, volume): return def _create_container(self, backup): new_container = self.update_container_name(backup, backup.container) if new_container: if new_container != backup.container: backup.container = new_container elif backup.container is None: backup.container = self.backup_default_container LOG.debug('_create_container started, container: %(container)s,' 'backup: %(backup_id)s.', {'container': backup.container, 'backup_id': backup.id}) backup.save() self.put_container(backup.container) return backup.container def _generate_object_names(self, backup): prefix = backup['service_metadata'] object_names = self.get_container_entries(backup['container'], prefix) LOG.debug('generated object list: %s.', object_names) return object_names def _metadata_filename(self, backup): object_name = backup['service_metadata'] filename = '%s_metadata' % object_name return filename def _sha256_filename(self, backup): object_name = backup['service_metadata'] filename = '%s_sha256file' % object_name return filename def _write_metadata(self, backup, volume_id, container, object_list, volume_meta, extra_metadata=None): filename = self._metadata_filename(backup) LOG.debug('_write_metadata started, container name: %(container)s,' ' metadata filename: %(filename)s.', {'container': container, 'filename': filename}) metadata = {} metadata['version'] = self.DRIVER_VERSION metadata['backup_id'] = backup['id'] metadata['volume_id'] = volume_id metadata['backup_name'] = backup['display_name'] metadata['backup_description'] = backup['display_description'] metadata['created_at'] = str(backup['created_at']) metadata['objects'] = object_list metadata['parent_id'] = backup['parent_id'] metadata['volume_meta'] = volume_meta if extra_metadata: metadata['extra_metadata'] = extra_metadata metadata_json = json.dumps(metadata, sort_keys=True, indent=2) metadata_json = metadata_json.encode('utf-8') with self._get_object_writer(container, filename) as writer: writer.write(metadata_json) LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json) def _write_sha256file(self, backup, volume_id, container, sha256_list): filename = self._sha256_filename(backup) LOG.debug('_write_sha256file started, container name: %(container)s,' ' sha256file filename: %(filename)s.', {'container': container, 'filename': filename}) sha256file = {} sha256file['version'] = self.DRIVER_VERSION sha256file['backup_id'] = backup['id'] sha256file['volume_id'] = volume_id sha256file['backup_name'] = backup['display_name'] sha256file['backup_description'] = backup['display_description'] sha256file['created_at'] = str(backup['created_at']) sha256file['chunk_size'] = self.sha_block_size_bytes sha256file['sha256s'] = sha256_list sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2) sha256file_json = sha256file_json.encode('utf-8') with self._get_object_writer(container, filename) as writer: writer.write(sha256file_json) LOG.debug('_write_sha256file finished.') def _read_metadata(self, backup): container = backup['container'] filename = self._metadata_filename(backup) LOG.debug('_read_metadata started, container name: %(container)s, ' 'metadata filename: %(filename)s.', {'container': container, 'filename': filename}) with self._get_object_reader(container, filename) as reader: metadata_json = reader.read() metadata_json = metadata_json.decode('utf-8') metadata = json.loads(metadata_json) LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json) return metadata def _read_sha256file(self, backup): container = backup['container'] filename = self._sha256_filename(backup) LOG.debug('_read_sha256file started, container name: %(container)s, ' 'sha256 filename: %(filename)s.', {'container': container, 'filename': filename}) with self._get_object_reader(container, filename) as reader: sha256file_json = reader.read() sha256file_json = sha256file_json.decode('utf-8') sha256file = json.loads(sha256file_json) LOG.debug('_read_sha256file finished.') return sha256file def _prepare_backup(self, backup): volume = self.db.volume_get(self.context, backup.volume_id) if volume['size'] <= 0: err = _('volume size %d is invalid.') % volume['size'] raise exception.InvalidVolume(reason=err) container = self._create_container(backup) object_prefix = self._generate_object_name_prefix(backup) backup.service_metadata = object_prefix backup.save() volume_size_bytes = volume['size'] * units.Gi availability_zone = self.az LOG.debug('starting backup of volume: %(volume_id)s,' ' volume size: %(volume_size_bytes)d, object names' ' prefix %(object_prefix)s, availability zone:' ' %(availability_zone)s', { 'volume_id': backup.volume_id, 'volume_size_bytes': volume_size_bytes, 'object_prefix': object_prefix, 'availability_zone': availability_zone, }) object_meta = {'id': 1, 'list': [], 'prefix': object_prefix, 'volume_meta': None} object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix} extra_metadata = self.get_extra_metadata(backup, volume) if extra_metadata is not None: object_meta['extra_metadata'] = extra_metadata return (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) def _backup_chunk(self, backup, container, data, data_offset, object_meta, extra_metadata): object_prefix = object_meta['prefix'] object_list = object_meta['list'] object_id = object_meta['id'] object_name = '%s-%05d' % (object_prefix, object_id) obj = {} obj[object_name] = {} obj[object_name]['offset'] = data_offset obj[object_name]['length'] = len(data) LOG.debug('Backing up chunk of data from volume.') algorithm, output_data = self._prepare_output_data(data) obj[object_name]['compression'] = algorithm LOG.debug('About to put_object') with self._get_object_writer( container, object_name, extra_metadata=extra_metadata ) as writer: writer.write(output_data) md5 = eventlet.tpool.execute( secretutils.md5, data, usedforsecurity=False).hexdigest() obj[object_name]['md5'] = md5 LOG.debug('backup MD5 for %(object_name)s: %(md5)s', {'object_name': object_name, 'md5': md5}) object_list.append(obj) object_id += 1 object_meta['list'] = object_list object_meta['id'] = object_id LOG.debug('Calling eventlet.sleep(0)') eventlet.sleep(0) def _prepare_output_data(self, data): if self.compressor is None: return 'none', data data_size_bytes = len(data) compressed_data = self.compressor.compress(data) comp_size_bytes = len(compressed_data) algorithm = CONF.backup_compression_algorithm.lower() if comp_size_bytes >= data_size_bytes: LOG.debug('Compression of this chunk was ineffective: ' 'original length: %(data_size_bytes)d, ' 'compressed length: %(compressed_size_bytes)d. ' 'Using original data for this chunk.', {'data_size_bytes': data_size_bytes, 'compressed_size_bytes': comp_size_bytes, }) return 'none', data LOG.debug('Compressed %(data_size_bytes)d bytes of data ' 'to %(comp_size_bytes)d bytes using %(algorithm)s.', {'data_size_bytes': data_size_bytes, 'comp_size_bytes': comp_size_bytes, 'algorithm': algorithm, }) return algorithm, compressed_data def _finalize_backup(self, backup, container, object_meta, object_sha256): object_list = object_meta['list'] object_id = object_meta['id'] volume_meta = object_meta['volume_meta'] sha256_list = object_sha256['sha256s'] extra_metadata = object_meta.get('extra_metadata') self._write_sha256file(backup, backup.volume_id, container, sha256_list) self._write_metadata(backup, backup.volume_id, container, object_list, volume_meta, extra_metadata) backup.object_count = object_id - 1 backup.save() LOG.debug('backup %s finished.', backup['id']) def _backup_metadata(self, backup, object_meta): json_meta = self.get_metadata(backup['volume_id']) if not json_meta: LOG.debug("No volume metadata to backup.") return object_meta["volume_meta"] = json_meta def _send_progress_end(self, context, backup, object_meta): object_meta['backup_percent'] = 100 volume_utils.notify_about_backup_usage(context, backup, "createprogress", extra_usage_info= object_meta) def _send_progress_notification(self, context, backup, object_meta, total_block_sent_num, total_volume_size): backup_percent = total_block_sent_num * 100 / total_volume_size object_meta['backup_percent'] = backup_percent volume_utils.notify_about_backup_usage(context, backup, "createprogress", extra_usage_info= object_meta) def _get_win32_phys_disk_size(self, disk_path): win32_diskutils = os_win_utilsfactory.get_diskutils() disk_number = win32_diskutils.get_device_number_from_device_name( disk_path) return win32_diskutils.get_disk_size(disk_number)
Apache License 2.0
autonomousvision/convolutional_occupancy_networks
src/common.py
get_nearest_neighbors_indices_batch
python
def get_nearest_neighbors_indices_batch(points_src, points_tgt, k=1): indices = [] distances = [] for (p1, p2) in zip(points_src, points_tgt): kdtree = KDTree(p2) dist, idx = kdtree.query(p1, k=k) indices.append(idx) distances.append(dist) return indices, distances
Returns the nearest neighbors for point sets batchwise. Args: points_src (numpy array): source points points_tgt (numpy array): target points k (int): number of nearest neighbors to return
https://github.com/autonomousvision/convolutional_occupancy_networks/blob/f44d413f8d455657a44c24d06163934c69141a09/src/common.py#L125-L142
import torch from src.utils.libkdtree import KDTree import numpy as np import math def compute_iou(occ1, occ2): occ1 = np.asarray(occ1) occ2 = np.asarray(occ2) if occ1.ndim >= 2: occ1 = occ1.reshape(occ1.shape[0], -1) if occ2.ndim >= 2: occ2 = occ2.reshape(occ2.shape[0], -1) occ1 = (occ1 >= 0.5) occ2 = (occ2 >= 0.5) area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1) area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1) iou = (area_intersect / area_union) return iou def chamfer_distance(points1, points2, use_kdtree=True, give_id=False): if use_kdtree: return chamfer_distance_kdtree(points1, points2, give_id=give_id) else: return chamfer_distance_naive(points1, points2) def chamfer_distance_naive(points1, points2): assert(points1.size() == points2.size()) batch_size, T, _ = points1.size() points1 = points1.view(batch_size, T, 1, 3) points2 = points2.view(batch_size, 1, T, 3) distances = (points1 - points2).pow(2).sum(-1) chamfer1 = distances.min(dim=1)[0].mean(dim=1) chamfer2 = distances.min(dim=2)[0].mean(dim=1) chamfer = chamfer1 + chamfer2 return chamfer def chamfer_distance_kdtree(points1, points2, give_id=False): batch_size = points1.size(0) points1_np = points1.detach().cpu().numpy() points2_np = points2.detach().cpu().numpy() idx_nn_12, _ = get_nearest_neighbors_indices_batch(points1_np, points2_np) idx_nn_12 = torch.LongTensor(idx_nn_12).to(points1.device) idx_nn_12_expand = idx_nn_12.view(batch_size, -1, 1).expand_as(points1) idx_nn_21, _ = get_nearest_neighbors_indices_batch(points2_np, points1_np) idx_nn_21 = torch.LongTensor(idx_nn_21).to(points1.device) idx_nn_21_expand = idx_nn_21.view(batch_size, -1, 1).expand_as(points2) points_12 = torch.gather(points2, dim=1, index=idx_nn_12_expand) points_21 = torch.gather(points1, dim=1, index=idx_nn_21_expand) chamfer1 = (points1 - points_12).pow(2).sum(2).mean(1) chamfer2 = (points2 - points_21).pow(2).sum(2).mean(1) chamfer = chamfer1 + chamfer2 if give_id: return chamfer1, chamfer2, idx_nn_12, idx_nn_21 return chamfer
MIT License
tongchangd/text_data_enhancement_with_lasertagger
transformer_decoder.py
DecoderStack.call
python
def call(self, decoder_inputs, encoder_outputs, decoder_self_attention_bias, attention_bias=None, cache=None): for n, layer in enumerate(self.layers): self_attention_layer = layer[0] feed_forward_network = layer[1] proj_layer = layer[2] decoder_inputs = tf.concat([decoder_inputs, encoder_outputs], axis=-1) decoder_inputs = proj_layer(decoder_inputs) layer_name = "layer_%d" % n layer_cache = cache[layer_name] if cache is not None else None with tf.variable_scope(layer_name): with tf.variable_scope("self_attention"): decoder_inputs = self_attention_layer( decoder_inputs, decoder_self_attention_bias, cache=layer_cache) with tf.variable_scope("ffn"): decoder_inputs = feed_forward_network(decoder_inputs) return self.output_normalization(decoder_inputs)
Returns the output of the decoder layer stacks. Args: decoder_inputs: tensor with shape [batch_size, target_length, hidden_size] encoder_outputs: tensor with shape [batch_size, input_length, hidden_size] decoder_self_attention_bias: bias for decoder self-attention layer. [1, 1, target_len, target_length] attention_bias: bias for encoder-decoder attention layer. [batch_size, 1, 1, input_length] cache: (Used for fast decoding) A nested dictionary storing previous decoder self-attention values. The items are: {layer_n: {"k": tensor with shape [batch_size, i, key_channels], "v": tensor with shape [batch_size, i, value_channels]}, ...} Returns: Output of decoder layer stack. float32 tensor with shape [batch_size, target_length, hidden_size]
https://github.com/tongchangd/text_data_enhancement_with_lasertagger/blob/b8286196e2f0e1decf73da79c665f25bf8a0ff45/transformer_decoder.py#L173-L212
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import Any, Mapping, Text import tensorflow as tf from official_transformer import attention_layer from official_transformer import embedding_layer from official_transformer import ffn_layer from official_transformer import model_utils from official_transformer import transformer class TransformerDecoder(transformer.Transformer): def __init__(self, params, train): self.train = train self.params = params self.embedding_softmax_layer = embedding_layer.EmbeddingSharedWeights( params["vocab_size"], params["hidden_size"], method="matmul" if params["use_tpu"] else "gather") if self.params["use_full_attention"]: self.decoder_stack = transformer.DecoderStack(params, train) else: self.decoder_stack = DecoderStack(params, train) def __call__(self, inputs, encoder_outputs, targets=None): initializer = tf.variance_scaling_initializer( self.params["initializer_gain"], mode="fan_avg", distribution="uniform") with tf.variable_scope("Transformer", initializer=initializer): attention_bias = model_utils.get_padding_bias(inputs) if targets is None: return self.predict(encoder_outputs, attention_bias) else: logits = self.decode(targets, encoder_outputs, attention_bias) return logits def _get_symbols_to_logits_fn(self, max_decode_length): timing_signal = model_utils.get_position_encoding( max_decode_length + 1, self.params["hidden_size"]) decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias( max_decode_length) def symbols_to_logits_fn(ids, i, cache): decoder_input = ids[:, -1:] decoder_input = self.embedding_softmax_layer(decoder_input) decoder_input += timing_signal[i:i + 1] self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1] if self.params["use_full_attention"]: encoder_outputs = cache.get("encoder_outputs") else: encoder_outputs = cache.get("encoder_outputs")[:, i:i+1] decoder_outputs = self.decoder_stack( decoder_input, encoder_outputs, self_attention_bias, cache.get("encoder_decoder_attention_bias"), cache) logits = self.embedding_softmax_layer.linear(decoder_outputs) logits = tf.squeeze(logits, axis=[1]) return logits, cache return symbols_to_logits_fn class DecoderStack(tf.layers.Layer): def __init__(self, params, train): super(DecoderStack, self).__init__() self.layers = [] for _ in range(params["num_hidden_layers"]): self_attention_layer = attention_layer.SelfAttention( params["hidden_size"], params["num_heads"], params["attention_dropout"], train) feed_forward_network = ffn_layer.FeedFowardNetwork( params["hidden_size"], params["filter_size"], params["relu_dropout"], train, params["allow_ffn_pad"]) proj_layer = tf.layers.Dense( params["hidden_size"], use_bias=True, name="proj_layer") self.layers.append([ transformer.PrePostProcessingWrapper( self_attention_layer, params, train), transformer.PrePostProcessingWrapper( feed_forward_network, params, train), proj_layer]) self.output_normalization = transformer.LayerNormalization( params["hidden_size"])
Apache License 2.0
googleads/google-ads-python
google/ads/googleads/v7/services/services/ad_group_audience_view_service/client.py
AdGroupAudienceViewServiceClientMeta.get_transport_class
python
def get_transport_class( cls, label: str = None, ) -> Type[AdGroupAudienceViewServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values()))
Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use.
https://github.com/googleads/google-ads-python/blob/6794993e146abcfe21292677144c66cb546446bc/google/ads/googleads/v7/services/services/ad_group_audience_view_service/client.py#L56-L74
from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib from google.api_core import exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.ads.googleads.v7.resources.types import ad_group_audience_view from google.ads.googleads.v7.services.types import ( ad_group_audience_view_service, ) from .transports.base import ( AdGroupAudienceViewServiceTransport, DEFAULT_CLIENT_INFO, ) from .transports.grpc import AdGroupAudienceViewServiceGrpcTransport class AdGroupAudienceViewServiceClientMeta(type): _transport_registry = ( OrderedDict() ) _transport_registry["grpc"] = AdGroupAudienceViewServiceGrpcTransport
Apache License 2.0
ibm/superglue-mtl
scripts/rewritting_utils.py
ConstituencyRule.gen_neg_output
python
def gen_neg_output(self, fmt_args, ans, entity_mentions): neg_output = [] ans_type = None for ent, ne_type in entity_mentions: if ans == ent: ans_type = ne_type break if ans_type is None: return [] possible_alternatives = [] for ent, ne_type in entity_mentions: if ne_type != ans_type:continue if ent.lower() in PRON_LIST: continue if ent in possible_alternatives:continue if ent == ans: continue if ent in fmt_args:continue possible_alternatives.append(ent) if len(possible_alternatives) == 0: return [] pattern_toks = self.out_pattern.split() for i in range(len(pattern_toks)): cur_tok = pattern_toks[i] if cur_tok in ["at", "in", "on"]: next_p = fmt_args[int(pattern_toks[i + 1][1:-1])] next_p_toks = next_p.split() if cur_tok in next_p_toks: next_p_toks.remove(cur_tok) fmt_args[int(pattern_toks[i + 1][1:-1])] = " ".join(next_p_toks) for alt in possible_alternatives: cur_formatted_output = self.out_pattern.format(*fmt_args).replace(ans, alt) neg_output.append(cur_formatted_output) return neg_output
switch the NE in ans with another entity of the same type
https://github.com/ibm/superglue-mtl/blob/1eb3e581c0ef3b4c261e0256ec26116d2b657c40/scripts/rewritting_utils.py#L216-L259
from nltk.corpus import wordnet as wn import corenlp_utils as corenlp import sys MODULE = "../resources/pattern-2.6/" sys.path.append(MODULE) from pattern import en as patten POS_TO_WORDNET = { 'NN': wn.NOUN, 'JJ': wn.ADJ, 'JJR': wn.ADJ, 'JJS': wn.ADJ, } POS_TO_PATTERN = { 'vb': 'inf', 'vbp': '1sg', 'vbz': '3sg', 'vbg': 'part', 'vbd': 'p', 'vbn': 'ppart', } PATTERN_TENSES = ['inf', '3sg', 'p', 'part', 'ppart', '1sg'] PRON_LIST = ["she", "he", "her", "him", "his", "you", "we", "it", "our"] def read_const_parse(parse_str): tree = corenlp.ConstituencyParse.from_corenlp(parse_str) new_tree = compress_whnp(tree) return new_tree def fix_style(s): s = s.replace('?', '') s = s.strip(' .') if s[0] == s[0].lower(): s = s[0].upper() + s[1:] return s + '.' CONST_PARSE_MACROS = { '$Noun': '$NP/$NN/$NNS/$NNP/$NNPS', '$Verb': '$VB/$VBD/$VBP/$VBZ', '$Part': '$VBN/$VG', '$Be': 'is/are/was/were', '$Do': "do/did/does/don't/didn't/doesn't", '$WHP': '$WHADJP/$WHADVP/$WHNP/$WHPP', } def _check_match(node, pattern_tok): if pattern_tok in CONST_PARSE_MACROS: pattern_tok = CONST_PARSE_MACROS[pattern_tok] if ':' in pattern_tok: lhs, rhs = pattern_tok.split(':') match_lhs = _check_match(node, lhs) if not match_lhs: return False phrase = node.get_phrase().lower() retval = any(phrase.startswith(w) for w in rhs.split('/')) return retval elif '/' in pattern_tok: return any(_check_match(node, t) for t in pattern_tok.split('/')) return ((pattern_tok.startswith('$') and pattern_tok[1:] == node.tag) or (node.word and pattern_tok.lower() == node.word.lower())) def _recursive_match_pattern(pattern_toks, stack, matches): if len(matches) == len(pattern_toks): return len(stack) == 0 if len(stack) == 0: return False cur_tok = pattern_toks[len(matches)] node = stack.pop() is_match = _check_match(node, cur_tok) if is_match: cur_num_matches = len(matches) matches.append(node) new_stack = list(stack) success = _recursive_match_pattern(pattern_toks, new_stack, matches) if success: return True while len(matches) > cur_num_matches: matches.pop() if not node.children: return False stack.extend(node.children[::-1]) return _recursive_match_pattern(pattern_toks, stack, matches) def match_pattern(pattern, const_parse): pattern_toks = pattern.split(' ') whole_phrase = const_parse.get_phrase() if whole_phrase.endswith('?') or whole_phrase.endswith('.'): pattern_toks.append(whole_phrase[-1]) matches = [] success = _recursive_match_pattern(pattern_toks, [const_parse], matches) if success: return matches else: return None def compress_whnp(tree, inside_whnp=False): if not tree.children: return tree for i, c in enumerate(tree.children): tree.children[i] = compress_whnp(c, inside_whnp=inside_whnp or tree.tag == 'WHNP') if tree.tag != 'WHNP': if inside_whnp: return corenlp.ConstituencyParse('NP', children=[tree]) return tree wh_word = None new_np_children = [] new_siblings = [] for i, c in enumerate(tree.children): if i == 0: if c.tag in ('WHNP', 'WHADJP', 'WHAVP', 'WHPP'): wh_word = c.children[0] new_np_children.extend(c.children[1:]) elif c.tag in ('WDT', 'WP', 'WP$', 'WRB'): wh_word = c else: return tree else: if c.tag == 'SQ': new_siblings = tree.children[i:] break new_np_children.append(corenlp.ConstituencyParse('NP', children=[c])) if new_np_children: new_np = corenlp.ConstituencyParse('NP', children=new_np_children) new_tree = corenlp.ConstituencyParse('WHNP', children=[wh_word, new_np]) else: new_tree = tree if new_siblings: new_tree = corenlp.ConstituencyParse('SBARQ', children=[new_tree] + new_siblings) return new_tree def convert_whp(node, q, a, tokens): if node.tag in ('WHNP', 'WHADJP', 'WHADVP', 'WHPP'): cur_phrase = node.get_phrase() cur_tokens = tokens[node.get_start_index():node.get_end_index()] for r in WHP_RULES: phrase, _ = r.convert(cur_phrase, a, cur_tokens, node, [], run_fix_style=False) if phrase: return phrase return None class ConversionRule(object): def convert(self, q, a, tokens, const_parse, entity_mentions, run_fix_style=True): raise NotImplementedError class ConstituencyRule(ConversionRule): def __init__(self, in_pattern, out_pattern, postproc=None): self.in_pattern = in_pattern self.out_pattern = unicode(out_pattern) self.name = in_pattern if postproc: self.postproc = postproc else: self.postproc = {} def convert(self, q, a, tokens, const_parse, entity_mentions, run_fix_style=True): pattern_toks = self.in_pattern.split(' ') match = match_pattern(self.in_pattern, const_parse) appended_clause = False if not match: appended_clause = True new_pattern = '$PP , ' + self.in_pattern pattern_toks = new_pattern.split(' ') match = match_pattern(new_pattern, const_parse) if not match: new_pattern = '$SBAR , ' + self.in_pattern pattern_toks = new_pattern.split(' ') match = match_pattern(new_pattern, const_parse) if not match: return None, None appended_clause_match = None fmt_args = [a] for t, m in zip(pattern_toks, match): if t.startswith('$') or '/' in t: phrase = convert_whp(m, q, a, tokens) if not phrase: phrase = m.get_phrase() fmt_args.append(phrase) if appended_clause: appended_clause_match = fmt_args[1] fmt_args = [a] + fmt_args[2:] for i in range(len(fmt_args)): if i in self.postproc: fmt_args[i] = run_postprocessing(fmt_args[i], self.postproc[i], fmt_args) output = self.gen_output(fmt_args) neg_output_list = self.gen_neg_output(fmt_args, a, entity_mentions) if appended_clause: output = appended_clause_match + ', ' + output if run_fix_style: output = fix_style(output) return output, neg_output_list
Apache License 2.0
balanced/status.balancedpayments.com
venv/lib/python2.7/site-packages/twilio/rest/resources/phone_numbers.py
PhoneNumber.transfer
python
def transfer(self, account_sid): a = self.parent.transfer(self.name, account_sid) self.load(a.__dict__)
Transfer the phone number with sid from the current account to another identified by account_sid
https://github.com/balanced/status.balancedpayments.com/blob/e51a371079a8fa215732be3cfa57497a9d113d35/venv/lib/python2.7/site-packages/twilio/rest/resources/phone_numbers.py#L74-L80
import re from twilio import TwilioException from twilio.rest.resources.util import change_dict_key, transform_params from twilio.rest.resources import InstanceResource, ListResource class AvailablePhoneNumber(InstanceResource): def __init__(self, parent): super(AvailablePhoneNumber, self).__init__(parent, "") self.name = "" def purchase(self, **kwargs): return self.parent.purchase(phone_number=self.phone_number, **kwargs) class AvailablePhoneNumbers(ListResource): name = "AvailablePhoneNumbers" key = "available_phone_numbers" instance = AvailablePhoneNumber types = {"local": "Local", "tollfree": "TollFree"} def __init__(self, base_uri, auth, phone_numbers): super(AvailablePhoneNumbers, self).__init__(base_uri, auth) self.phone_numbers = phone_numbers def get(self, sid): raise TwilioException("Individual AvailablePhoneNumbers have no sid") def list(self, type="local", country="US", region=None, postal_code=None, lata=None, rate_center=None, **kwargs): kwargs["in_region"] = kwargs.get("in_region", region) kwargs["in_postal_code"] = kwargs.get("in_postal_code", postal_code) kwargs["in_lata"] = kwargs.get("in_lata", lata) kwargs["in_rate_center"] = kwargs.get("in_rate_center", rate_center) params = transform_params(kwargs) uri = "%s/%s/%s" % (self.uri, country, self.types[type]) resp, page = self.request("GET", uri, params=params) return [self.load_instance(i) for i in page[self.key]] def load_instance(self, data): instance = self.instance(self.phone_numbers) instance.load(data) instance.load_subresources() return instance class PhoneNumber(InstanceResource): def load(self, entries): if "account_sid" in entries: uri = re.sub(r'AC(.*)', entries["account_sid"], self.parent.base_uri) self.parent = PhoneNumbers(uri, self.parent.auth) self.base_uri = self.parent.uri super(PhoneNumber, self).load(entries)
MIT License
bdtinc/maskcam
server/backend/app/db/cruds/crud_device.py
get_devices
python
def get_devices(db_session: Session) -> List[DeviceModel]: return db_session.query(DeviceModel).all()
Get all devices. Arguments: db_session {Session} -- Database session. Returns: List[DeviceModel] -- All device instances present in the database.
https://github.com/bdtinc/maskcam/blob/4841c2c49235844765e8c2164f5dd03a7d28bdad/server/backend/app/db/cruds/crud_device.py#L83-L93
from typing import List, Union, Dict from app.db.schema import DeviceModel from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session from sqlalchemy.orm.exc import NoResultFound def create_device( db_session: Session, device_information: Dict = {} ) -> Union[DeviceModel, IntegrityError]: try: device_information["id"] = device_information["id"].replace(" ", "_") device = DeviceModel(**device_information) db_session.add(device) db_session.commit() db_session.refresh(device) return device except IntegrityError: db_session.rollback() raise def get_device( db_session: Session, device_id: str ) -> Union[DeviceModel, NoResultFound]: try: return get_device_by_id(db_session, device_id) except NoResultFound: raise
MIT License
opennetworkingfoundation/tapi
RI/flask_server/tapi_server/models/tapi_topology_validation_mechanism.py
TapiTopologyValidationMechanism.validation_mechanism
python
def validation_mechanism(self): return self._validation_mechanism
Gets the validation_mechanism of this TapiTopologyValidationMechanism. Name of mechanism used to validate adjacency # noqa: E501 :return: The validation_mechanism of this TapiTopologyValidationMechanism. :rtype: str
https://github.com/opennetworkingfoundation/tapi/blob/1f3fd9483d5674552c5a31206c97399c8c151897/RI/flask_server/tapi_server/models/tapi_topology_validation_mechanism.py#L79-L87
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from tapi_server.models.base_model_ import Model from tapi_server import util class TapiTopologyValidationMechanism(Model): def __init__(self, layer_protocol_adjacency_validated=None, validation_mechanism=None, validation_robustness=None): self.openapi_types = { 'layer_protocol_adjacency_validated': str, 'validation_mechanism': str, 'validation_robustness': str } self.attribute_map = { 'layer_protocol_adjacency_validated': 'layer-protocol-adjacency-validated', 'validation_mechanism': 'validation-mechanism', 'validation_robustness': 'validation-robustness' } self._layer_protocol_adjacency_validated = layer_protocol_adjacency_validated self._validation_mechanism = validation_mechanism self._validation_robustness = validation_robustness @classmethod def from_dict(cls, dikt) -> 'TapiTopologyValidationMechanism': return util.deserialize_model(dikt, cls) @property def layer_protocol_adjacency_validated(self): return self._layer_protocol_adjacency_validated @layer_protocol_adjacency_validated.setter def layer_protocol_adjacency_validated(self, layer_protocol_adjacency_validated): self._layer_protocol_adjacency_validated = layer_protocol_adjacency_validated @property
Apache License 2.0
alephdata/followthemoney
followthemoney/graph.py
Graph.add
python
def add(self, proxy: EntityProxy) -> None: if proxy is None: return self.queue(proxy.id, proxy) if proxy.schema.edge: for (source, target) in proxy.edgepairs(): self._add_edge(proxy, source, target) else: self._add_node(proxy)
Add an :class:`~followthemoney.proxy.EntityProxy` to the graph and make it either a :class:`~followthemoney.graph.Node` or an :class:`~followthemoney.graph.Edge`.
https://github.com/alephdata/followthemoney/blob/e7e1480aeac64c6284aaeb058a825587b8ff332e/followthemoney/graph.py#L245-L256
import logging from typing import Any, Dict, Generator, Iterable, List, Optional, Sequence from followthemoney.types import registry from followthemoney.types.common import PropertyType from followthemoney.schema import Schema from followthemoney.proxy import EntityProxy from followthemoney.property import Property from followthemoney.exc import InvalidData, InvalidModel log = logging.getLogger(__name__) class Node(object): __slots__ = ["type", "value", "id", "proxy", "schema"] def __init__( self, type_: PropertyType, value: str, proxy: Optional[EntityProxy] = None, schema: Optional[Schema] = None, ) -> None: self.type = type_ self.value = value _id = type_.node_id_safe(value) if _id is None: raise InvalidData("No ID for node") self.id = _id self.proxy = proxy self.schema = schema if proxy is None else proxy.schema @property def is_entity(self) -> bool: return self.type == registry.entity @property def caption(self) -> str: if self.type == registry.entity and self.proxy is not None: return self.proxy.caption caption = self.type.caption(self.value) return caption or self.value def to_dict(self) -> Dict[str, Any]: return { "id": self.id, "type": self.type.name, "value": self.value, "caption": self.caption, } @classmethod def from_proxy(cls, proxy: EntityProxy) -> "Node": return cls(registry.entity, proxy.id, proxy=proxy) def __str__(self) -> str: return self.caption def __repr__(self) -> str: return "<Node(%r, %r, %r)>" % (self.id, self.type, self.caption) def __hash__(self) -> int: return hash(self.id) def __eq__(self, other: Any) -> bool: return bool(self.id == other.id) class Edge(object): __slots__ = [ "id", "weight", "source_id", "target_id", "prop", "proxy", "schema", "graph", ] def __init__( self, graph: "Graph", source: Node, target: Node, proxy: Optional[EntityProxy] = None, prop: Optional[Property] = None, value: Optional[str] = None, ): self.graph = graph self.id = f"{source.id}<>{target.id}" self.source_id = source.id self.target_id = target.id self.weight = 1.0 self.prop = prop self.proxy = proxy self.schema: Optional[Schema] = None if prop is not None and value is not None: self.weight = prop.specificity(value) if proxy is not None: self.id = f"{source.id}<{proxy.id}>{target.id}" self.schema = proxy.schema @property def source(self) -> Optional[Node]: return self.graph.nodes.get(self.source_id) @property def source_prop(self) -> Property: if self.schema is not None and self.schema.source_prop is not None: if self.schema.source_prop.reverse is not None: return self.schema.source_prop.reverse if self.prop is None: raise InvalidModel("Contradiction: %r" % self) return self.prop @property def target(self) -> Optional[Node]: return self.graph.nodes.get(self.target_id) @property def target_prop(self) -> Optional[Property]: if self.schema is not None and self.schema.target_prop is not None: return self.schema.target_prop.reverse if self.prop is not None: return self.prop.reverse return None @property def type_name(self) -> str: if self.schema is not None: return self.schema.name if self.prop is None: raise InvalidModel("Invalid edge: %r" % self) return self.prop.name def to_dict(self) -> Dict[str, str]: return { "id": self.id, "source_id": self.source_id, "target_id": self.target_id, "type_name": self.type_name, } def __repr__(self) -> str: return "<Edge(%r)>" % self.id def __hash__(self) -> int: return hash(self.id) def __eq__(self, other: Any) -> bool: return bool(self.id == other.id) class Graph(object): def __init__(self, edge_types: Iterable[PropertyType] = registry.pivots) -> None: types = registry.get_types(edge_types) self.edge_types = [t for t in types if t.matchable] self.flush() def flush(self) -> None: self.edges: Dict[str, Edge] = {} self.nodes: Dict[str, Node] = {} self.proxies: Dict[str, Optional[EntityProxy]] = {} def queue(self, id_: str, proxy: Optional[EntityProxy] = None) -> None: if id_ not in self.proxies or proxy is not None: self.proxies[id_] = proxy @property def queued(self) -> List[str]: return [i for (i, p) in self.proxies.items() if p is None] def _get_node_stub(self, prop: Property, value: str) -> Node: if prop.type == registry.entity: self.queue(value) node = Node(prop.type, value, schema=prop.range) if node.id not in self.nodes: self.nodes[node.id] = node return self.nodes[node.id] def _add_edge(self, proxy: EntityProxy, source: str, target: str) -> None: if proxy.schema.source_prop is None: raise InvalidModel("Invalid edge entity: %r" % proxy) source_node = self._get_node_stub(proxy.schema.source_prop, source) if proxy.schema.target_prop is None: raise InvalidModel("Invalid edge entity: %r" % proxy) target_node = self._get_node_stub(proxy.schema.target_prop, target) edge = Edge(self, source_node, target_node, proxy=proxy) self.edges[edge.id] = edge def _add_node(self, proxy: EntityProxy) -> None: entity = Node.from_proxy(proxy) self.nodes[entity.id] = entity for prop, value in proxy.itervalues(): if prop.type not in self.edge_types: continue node = self._get_node_stub(prop, value) edge = Edge(self, entity, node, prop=prop, value=value) if edge.weight > 0: self.edges[edge.id] = edge
MIT License
johnpdowling/custom_components
forked-daapd/media_player.py
ForkedDaapd.play
python
def play(self): return self._command('play')
Set playback to play and returns the current state.
https://github.com/johnpdowling/custom_components/blob/6019dc02d62ae0f0a16ffeda09438d54b94127e9/forked-daapd/media_player.py#L120-L122
import logging import requests import voluptuous as vol import os from homeassistant.components.media_player import ( MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.components.media_player.const import ( MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PORT, CONF_SSL, STATE_IDLE, STATE_OFF, STATE_ON, STATE_PAUSED, STATE_PLAYING) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'forked-daapd' DEFAULT_PORT = 3689 DEFAULT_SSL = False DEFAULT_TIMEOUT = 10 DOMAIN = 'forked-daapd' SUPPORT_FORKEDDAAPD = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_PLAY_MEDIA | SUPPORT_TURN_OFF | SUPPORT_TURN_ON SUPPORT_AIRPLAY = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, }) class ForkedDaapd: def __init__(self, host, port, use_ssl): self.host = host self.port = port self.use_ssl = use_ssl os.system("apk add --no-cache mpg123") @property def _base_url(self): if self.use_ssl: uri_scheme = 'https://' else: uri_scheme = 'http://' if self.port: return '{}{}:{}'.format(uri_scheme, self.host, self.port) return '{}{}'.format(uri_scheme, self.host) def _request(self, method, path, params=None): url = '{}{}'.format(self._base_url, path) try: if method == 'GET': response = requests.get(url, timeout=DEFAULT_TIMEOUT) elif method == 'PUT_EMPTY': response = requests.put(url, timeout=DEFAULT_TIMEOUT) elif method == 'PUT': response = requests.put(url, json=dict(params) if params else None, timeout=DEFAULT_TIMEOUT) elif method == 'DELETE': response = requests.delete(url, timeout=DEFAULT_TIMEOUT) try: return response.json() except: return {} except requests.exceptions.HTTPError: return {'player_state': 'error'} except requests.exceptions.RequestException: return {'player_state': 'offline'} def _command(self, named_command): return self._request('PUT', '/api/player/' + named_command) def player(self): return self._request('GET', '/api/player') def queue(self, item_id = None): if item_id is None: return self._request('GET', '/api/queue') else: return self._request('GET', "/api/queue?id=" + str(item_id)) def set_volume(self, level): return self._request('PUT', '/api/player/volume?volume=' + str(int(level * 100))) def set_muted(self, muted): if muted is True: self.set_volume(0) else: self.set_volume(0.5)
Apache License 2.0
pedrolamas/home-assistant-config
config/custom_components/hacs/tasks/manager.py
HacsTaskManager.__init__
python
def __init__(self, hacs: HacsBase, hass: HomeAssistant) -> None: self.hacs = hacs self.hass = hass self.__tasks: dict[str, HacsTask] = {}
Initialize the setup manager class.
https://github.com/pedrolamas/home-assistant-config/blob/66f1fb80e12468a6fe97ae5dfef64d2691754a41/config/custom_components/hacs/tasks/manager.py#L18-L22
from __future__ import annotations import asyncio from importlib import import_module from pathlib import Path from homeassistant.core import HomeAssistant from ..base import HacsBase from ..mixin import LogMixin from .base import HacsTask class HacsTaskManager(LogMixin):
MIT License
robertwayne/dpymenus
dpymenus/poll.py
Poll._finish_poll
python
async def _finish_poll(self): cheaters = await self._get_cheaters() for voters in self.data.values(): voters -= cheaters await self.output.clear_reactions() await self.page.on_next_event(self)
Removes multi-votes and calls the Page on_next function when finished.
https://github.com/robertwayne/dpymenus/blob/35cf9e3e9e6306cc6e6a5266688a56ae1edfa49e/dpymenus/poll.py#L125-L132
import asyncio import logging from typing import Any, Dict, List, Set from warnings import warn from discord import RawReactionActionEvent, User from discord.ext.commands import Context from dpymenus import ButtonMenu, ButtonsError, EventError, PagesError, SessionError from dpymenus.hooks import call_hook class Poll(ButtonMenu): def __init__(self, ctx: Context): super().__init__(ctx) self.voted: Set[User] = set() def __repr__(self): return f'Poll(pages={[p.__str__() for p in self.pages]}, page={self.page.index}, timeout={self.timeout}, data={self.data})' async def results(self) -> Dict[str, int]: return {choice: len(voters) for choice, voters in self.data.items()} async def add_results_fields(self): for choice, voters in self.data.items(): next_page = self.pages[self.page.index + 1] next_page.add_field(name=choice, value=str(len(voters))) async def generate_results_page(self): next_page = self.pages[self.page.index + 1] await self.add_results_fields() highest_value = max(self.data.values()) winning_key = {choice for choice, voters in self.data.items() if voters == highest_value} if len(highest_value) == 0: next_page.description = ' '.join([next_page.description, f'It\'s a draw!']) else: next_page.description = ' '.join([next_page.description, f'{str(next(iter(winning_key)))} wins!']) async def open(self): try: self._validate_callbacks() await super()._open() except SessionError as exc: logging.info(exc.message) else: await self._set_data() await self._add_buttons() await call_hook(self, '_hook_after_open') pending = set() while self.active: try: _, pending = await asyncio.wait( [ asyncio.create_task(self._get_vote_add()), asyncio.create_task(self._get_vote_remove()), asyncio.create_task(self._poll_timer()), ], return_when=asyncio.FIRST_COMPLETED, ) finally: for task in pending: task.cancel() await self._finish_poll() async def _get_vote_add(self): while True: try: reaction_event = await self.ctx.bot.wait_for( 'raw_reaction_add', timeout=self.timeout, check=self._check_reaction ) except asyncio.TimeoutError: return else: if reaction_event.emoji.name in self.page.buttons_list: self.data[reaction_event.emoji.name].add(reaction_event.user_id) async def _get_vote_remove(self): while True: try: reaction_event = await self.ctx.bot.wait_for( 'raw_reaction_remove', timeout=self.timeout, check=self._check_reaction, ) except asyncio.TimeoutError: return else: if reaction_event.emoji.name in self.page.buttons_list: self.data[reaction_event.emoji.name].remove(reaction_event.user_id) def _check_reaction(self, event: RawReactionActionEvent) -> bool: return event.member is not None and event.member.bot is False async def _poll_timer(self): await asyncio.sleep(self.timeout)
MIT License
ulule/django-linguist
linguist/utils.py
get_language_fields
python
def get_language_fields(fields): return [ "%s_%s" % (field, lang) for field in fields for lang in get_supported_languages() ]
Takes a list of fields and returns related language fields.
https://github.com/ulule/django-linguist/blob/dad1ccacb02ab9aa3b05da3bcfbade6e1da70ddb/linguist/utils.py#L77-L85
import copy import itertools import collections from importlib import import_module from django.db.models import QuerySet from django.core import exceptions from django.utils.encoding import force_text from django.utils.functional import lazy from django.utils.translation import get_language as _get_language from . import settings collections_abc = getattr(collections, 'abc', collections) CLASS_PATH_ERROR = ( "django-linguist is unable to interpret settings value for %s. " "%s should be in the form of a tupple: " "('path.to.models.Class', 'app_label')." ) def get_language_name(code): languages = dict( (lang_code, lang_name) for lang_code, lang_name in settings.SUPPORTED_LANGUAGES ) return languages.get(code) def get_language(): lang = _get_language() if not lang: return get_fallback_language() langs = [l[0] for l in settings.SUPPORTED_LANGUAGES] if lang not in langs and "-" in lang: lang = lang.split("-")[0] if lang in langs: return lang return settings.DEFAULT_LANGUAGE def get_fallback_language(): return settings.DEFAULT_LANGUAGE def get_real_field_name(field, lang=None): if lang is None: lang = get_language() return str("%s_%s" % (field, lang.replace("-", "_"))) def get_fallback_field_name(field): return get_real_field_name(field, lang=get_fallback_language()) def get_supported_languages(): return [code.replace("-", "_") for code, name in settings.SUPPORTED_LANGUAGES]
MIT License
microsoft/restler-fuzzer
restler/engine/fuzzing_parameters/request_params.py
ParamObject.check_type_mismatch
python
def check_type_mismatch(self, check_value): if not isinstance(check_value, dict): return self.tag for member in self._members: tag = member.check_type_mismatch(check_value) if tag: return tag return None
Checks to see if the check_value param is a dict object and then checks each object member for its correct type. If any of the param types are a mismatch, returns that param's tag. @param check_value: The body string that is used to compare with this param @type check_value: Str @return: The tag of the mismatched param if a type mismatch was detected or None @rtype : Str or None
https://github.com/microsoft/restler-fuzzer/blob/d74a267467a2d43fb37c8a16754d0b28e80b649a/restler/engine/fuzzing_parameters/request_params.py#L411-L430
import sys import json from abc import ABCMeta, abstractmethod import engine.primitives as primitives import engine.dependencies as dependencies from engine.fuzzing_parameters.fuzzing_config import * TAG_SEPARATOR = '/' FUZZABLE_GROUP_TAG = "fuzzable_group_tag" class KeyValueParamBase(): __metaclass__ = ABCMeta """ Abstract base class for parameters that are key-value pairs, such as query and header parameters. """ def __init__(self, key, content): self._key = key self._content = content @property def key(self): return self._key @property def content(self): return self._content @property def is_required(self): return self.content.is_required @property def is_dynamic_object(self): return self.content.is_dynamic_object def type(self): return self._type def __eq__(self, other): if not isinstance(other, QueryParam): return False return self._content == other.content def __hash__(self): return hash(self._content) class QueryParam(KeyValueParamBase): def __init__(self, key, content): KeyValueParamBase.__init__(self, key, content) def get_blocks(self): key = primitives.restler_static_string(f'{self._key}=') return [key] + self._content.get_blocks(FuzzingConfig()) class HeaderParam(KeyValueParamBase): def __init__(self, key, content): KeyValueParamBase.__init__(self, key, content) def get_blocks(self): key = primitives.restler_static_string(f'{self._key}: ') return [key] + self._content.get_blocks(FuzzingConfig()) class ParamBase(): def __init__(self, is_required=True, is_dynamic_object=False): self._fuzzable = False self._tag = '' self._is_required = is_required self._is_dynamic_object = is_dynamic_object @property def tag(self): return self._tag @tag.setter def tag(self, tag): self._tag = tag @property def is_required(self): return self._is_required @property def is_dynamic_object(self): return self._is_dynamic_object def meta_copy(self, src): self.set_fuzzable(src.is_fuzzable()) self._tag = src._tag self._is_required = src._is_required def set_fuzzable(self, is_fuzzable): self._fuzzable = is_fuzzable def is_fuzzable(self): return self._fuzzable class ParamValue(ParamBase): def __init__(self, custom_payload_type=None, is_required=True, is_dynamic_object=False): ParamBase.__init__(self, is_required, is_dynamic_object) self._content = None self._custom_payload_type = custom_payload_type def __eq__(self, other): if not isinstance(other, ParamValue): return False return self._content == other.content def __hash__(self): return hash((self._content, self._tag)) @property def content(self): return self._content @content.setter def content(self, content): self._content = content def count_nodes(self, config): return 0 def get_signature(self, config): return f'{TAG_SEPARATOR}{self.tag}_str' def get_schema_tag_mapping(self, tags: dict, config): tags.update({self.tag : self._content}) def get_blocks(self, config=None): if self._custom_payload_type is not None: if self._custom_payload_type == "String": return [primitives.restler_custom_payload(self._content)] elif self._custom_payload_type == "Query": return [primitives.restler_custom_payload(self._content)] elif self._custom_payload_type == "Header": return [primitives.restler_custom_payload_header(self._content)] else: raise Exception(f"Unknown custom payload type: {self._custom_payload_type}") content = self._content if self.is_dynamic_object: content = dependencies.RDELIM + self._content + dependencies.RDELIM return [primitives.restler_static_string(content)] def get_fuzzing_blocks(self, visitor): return [] def check_type_mismatch(self, check_value): if not isinstance(check_value, self.type): return self.tag return None def check_struct_missing(self, check_value, visitor): pass class ParamObject(ParamBase): def __init__(self, members, is_required=True, is_dynamic_object=False): ParamBase.__init__(self, is_required, is_dynamic_object) self._members = members def __eq__(self, other): if not isinstance(other, ParamObject): return False return self._members == other.members def __hash__(self): _hash = 0 for member in self._members: _hash += hash(member) return _hash @property def members(self): if self._members: return self._members return [] def get_schema_tag_mapping(self, tags: dict, config): if config.depth < config.max_depth: config.depth += 1 for member in self._members: member.get_schema_tag_mapping(tags, config) config.depth -= 1 def get_signature(self, config): return self._traverse(config, sys._getframe().f_code.co_name, f'{TAG_SEPARATOR}obj') def count_nodes(self, config): return self._traverse(config, sys._getframe().f_code.co_name, 1) def get_blocks(self, config): members_blocks = self._traverse(config, sys._getframe().f_code.co_name, []) return self._get_blocks(members_blocks) def get_fuzzing_pool(self, fuzzer, config): fuzzed_members = [] if config.depth < config.max_depth: config.depth += 1 for member in self._members: fuzzed_members.append(member.get_fuzzing_pool(fuzzer, config)) config.depth -= 1 return fuzzer._fuzz_object(self, fuzzed_members) def get_fuzzing_blocks(self, config): members_blocks = self._traverse(config, sys._getframe().f_code.co_name, []) return self._get_blocks(members_blocks)
MIT License
finance-hub/financehub
webscrapers/CETIP/getcetipdata.py
CETIP._get_dates
python
def _get_dates(initial_date, end_date): oldest_date = "2012-08-20" if initial_date is None or (strptime(initial_date, '%Y-%m-%d') < strptime(initial_date, '%Y-%m-%d')): initial_date = oldest_date if end_date is None: end_date = (datetime.today() - timedelta(1)).strftime('%Y-%m-%d') df = pd.date_range(initial_date, end_date, freq='D') return df
:param initial_date: initial date for the time interval. If None, uses the first available date on CETIP :param end_date: end date for the time interval. If None, uses the previous day. :return: pandas DataFrame with the time interval specified.
https://github.com/finance-hub/financehub/blob/3968d9965e8e2c3b5850f1852b56c485859a9c89/webscrapers/CETIP/getcetipdata.py#L75-L92
import pandas as pd from datetime import datetime, timedelta from time import strptime class CETIP(object): def fetch(self, series_id, initial_date=None, end_date=None): if type(series_id) is list: df = pd.DataFrame() for cod in series_id: series = self._fetch_single_series(cod, initial_date, end_date) df = pd.concat([df, series], axis=1) df.sort_index(inplace=True) else: df = self._fetch_single_series(series_id, initial_date, end_date) return df def _fetch_single_series(self, series_id, initial_date, end_date): dates = self._get_dates(initial_date, end_date) df = pd.DataFrame() for date in dates: url = f'ftp://ftp.cetip.com.br/{series_id}/{date.strftime("%Y%m%d")}.txt' try: data = pd.read_csv(url, header=None).iloc[0, 0] if series_id == 'MediaCDI': data = data/100 df.loc[date, series_id] = data except: continue return df @staticmethod
MIT License
bogdanvuk/pygears
pygears/hls/ast/visitor.py
visit_ast
python
def visit_ast(node, ctx): if node is None: return ir.ResExpr(None) breakpoint() raise SyntaxError(f"Unsupported language construct", node.lineno)
Used by default. Called if no explicit function exists for a node.
https://github.com/bogdanvuk/pygears/blob/a0b21d445e1d5c89ad66751447b8253536b835ee/pygears/hls/ast/visitor.py#L347-L353
import inspect import typing from pygears.core.infer_ftypes import infer_ftypes from pygears.core.gear import OutSig, InSig from pygears.typing import Any, typeof from functools import singledispatch from dataclasses import dataclass from .. import ir from pygears import reg, Intf from .utils import add_to_list, get_function_source, get_function_ast from pygears.core.util import get_function_context_dict from pygears.conf.trace import gear_definition_location from pygears.conf.trace import make_traceback, TraceException import sys class HLSSyntaxError(TraceException): pass def form_hls_syntax_error(ctx, e, lineno=1): msg = str(e) for c in reversed(reg['hls/ctx']): if isinstance(c, GearContext): func, fn, ln, _ = gear_definition_location(c.gear.func) msg += (f'\n - when compiling gear "{c.gear.name}" with' f' parameters {c.gear.params}') else: func, fn, ln, _ = gear_definition_location(c.funcref.func) msg += (f'\n - when compiling function "{c.funcref.func}" with' f' signature {c.signature}') err = HLSSyntaxError(msg, ln + lineno - 1, filename=fn) traceback = make_traceback((HLSSyntaxError, err, sys.exc_info()[2])) _, exc_value, tb = traceback.standard_exc_info return exc_value, tb @dataclass class Submodule: gear: typing.Any in_ports: typing.List out_ports: typing.List def hashable(v): try: hash(v) except TypeError: return False return True class Function: def __init__(self, func, args, kwds, uniqueid=None): self.source = get_function_source(func) self.func = func self.ast = get_function_ast(func) self.basename = ''.join(e for e in func.__name__ if e.isalnum() or e == '_') self.uniqueid = uniqueid hargs = tuple(arg.val if isinstance(arg, ir.ResExpr) else arg.dtype for arg in args) hkwds = tuple((k, v) for k, v in kwds.items() if hashable(v)) if self.source: self._hash = hash(self.source) ^ hash(hargs) ^ hash(hkwds) else: self._hash = hash(self.__qualname__) ^ hash(hargs) ^ hash(hkwds) @property def name(self): if self.uniqueid is None: return self.basename return f'{self.basename}_{self.uniqueid}' def __eq__(self, other): return hash(self) == hash(other) def __hash__(self): return self._hash class Context: def __init__(self): self.scope: typing.Dict = {} self.methods = {} self.local_namespace: typing.Dict = {'__builtins__': __builtins__} self.ir_block_closure: typing.List = [] self.submodules: typing.List[Submodule] = [] self.reaching: typing.Dict = {} self.ast_stmt_map: typing.Dict = {} def ref(self, name, ctx='load'): if name in self.scope: return ir.Name(name, self.scope[name], ctx=ctx) if name in self.local_namespace: return ir.ResExpr(self.local_namespace[name]) raise KeyError def find_unique_name(self, name): res_name = name i = 0 while res_name in self.scope: i += 1 res_name = f'{name}_{i}' return res_name @property def ir_parent_block(self): return self.ir_block_closure[-1] @property def variables(self): return { name: obj for name, obj in self.scope.items() if isinstance(obj, ir.Variable) and obj.val is None } class IntfProxy(Intf): def __init__(self, port): self.port = port @property def dtype(self): return self.port.dtype def __str__(self): return self.port.basename def __repr__(self): return repr(self.port) class GearContext(Context): def __init__(self, gear): super().__init__() self.gear = gear self.functions: typing.Mapping[Function, FuncContext] = {} self.local_namespace = get_function_context_dict(self.gear.func).copy() paramspec = inspect.getfullargspec(self.gear.func) vararg = [] for p in self.gear.in_ports: self.scope[p.basename] = ir.Variable(p.basename, ir.IntfType[p.dtype, ir.IntfType.iin]) if paramspec.varargs and p.basename.startswith(paramspec.varargs): vararg.append(self.ref(p.basename)) if paramspec.varargs: self.local_namespace[paramspec.varargs] = ir.ConcatExpr(vararg) for p in self.gear.out_ports: self.scope[p.basename] = ir.Variable(p.basename, ir.IntfType[p.consumer.dtype, ir.IntfType.iout]) for k, v in self.gear.explicit_params.items(): self.scope[k] = ir.ResExpr(v) @property def intfs(self): return { name: obj for name, obj in self.scope.items() if isinstance(obj, ir.Variable) and typeof(obj.dtype, ir.IntfType) } @property def signals(self): return { name: obj for name, obj in self.scope.items() if isinstance(obj, ir.Variable) and isinstance(obj.val, (OutSig, InSig)) } @property def regs(self): return { name: obj for name, obj in self.scope.items() if isinstance(obj, ir.Variable) and obj.reg } @property def in_ports(self): return [ obj for obj in self.scope.values() if (isinstance(obj, ir.Interface) and isinstance(obj.intf, Intf) and obj.intf.producer and obj.intf.producer.gear is self.gear) ] @property def out_ports(self): return [self.ref(p.basename) for p in self.gear.out_ports] class FuncContext(Context): def argdict(self, args, kwds): paramspec = inspect.getfullargspec(self.funcref.func) args = dict(zip(paramspec.args, args)) args.update(kwds) for name in self.const_args: if name not in args: continue del args[name] return args def __init__(self, funcref: Function, args, kwds): super().__init__() self.funcref = funcref func = funcref.func self.local_namespace = get_function_context_dict(func).copy() paramspec = inspect.getfullargspec(funcref.func) args = dict(zip(paramspec.args, args)) args.update(kwds) self.ret_dtype = None self.const_args = {} kwddefaults = paramspec.kwonlydefaults or {} if paramspec.defaults: for name, val in zip(reversed(paramspec.args), reversed(paramspec.defaults)): if name in args: continue args[name] = ir.ResExpr(val) for k, v in kwddefaults.items(): if k not in args: args[k] = ir.ResExpr(v) if func.__annotations__: params = {**func.__annotations__, **kwddefaults} for a in paramspec.args: if a not in params: params[a] = Any arg_types = {} for name in paramspec.args: if name not in args: continue arg_types[name] = args[name].dtype for name, var in args.items(): if name in params: continue if isinstance(var, ir.ResExpr): params[name] = var.val else: params[name] = var res = infer_ftypes(params=params, args=arg_types, namespace=self.local_namespace) for name, dtype in res.items(): if name not in args: continue if isinstance(args[name], ir.ResExpr): self.local_namespace[name] = args[name].val self.const_args[name] = args[name] else: self.scope[name] = ir.Variable(name, dtype) else: for name, arg in args.items(): if isinstance(arg, ir.ResExpr): self.local_namespace[name] = arg.val self.const_args[name] = arg else: self.scope[name] = ir.Variable(name, arg.dtype) self.signature = { name: self.scope.get(name, var).dtype for name, var in args.items() if name not in self.const_args } def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value def node_visitor(ast_type): def wrapper(f): def func_wrapper(node, ctx): if reg['trace/level'] == 0: return f(node, ctx) err = None try: return f(node, ctx) except Exception as e: if isinstance(e, HLSSyntaxError) and e.lineno is not None: _, exc_value, tb = sys.exc_info() elif hasattr(node, 'lineno'): exc_value, tb = form_hls_syntax_error(ctx, e, node.lineno) else: err = e tb = None if err is not None: raise err else: raise exc_value.with_traceback(tb) if isinstance(ast_type, tuple): f_ret = visit_ast.register(ast_type[0])(func_wrapper) for t in ast_type[1:]: f_ret = visit_ast.register(t)(f_ret) else: f_ret = visit_ast.register(ast_type)(func_wrapper) return f_ret return wrapper @singledispatch
MIT License
hallee/espresso-arm
remi/remi/gui.py
Widget.set_on_touchleave_listener
python
def set_on_touchleave_listener(self, listener, funcname): self.attributes[self.EVENT_ONTOUCHLEAVE] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONTOUCHLEAVE) self.eventManager.register_listener(self.EVENT_ONTOUCHLEAVE, listener, funcname)
Registers the listener for the Widget.ontouchleave event. Note: the listener prototype have to be in the form on_widget_touchleave(self) Args: listener (App, Widget): Instance of the listener. It can be the App or a Widget. funcname (str): Literal name of the listener function, member of the listener instance
https://github.com/hallee/espresso-arm/blob/d535cc7d8fa41043c6f27fcefa52f98168df4cd4/remi/remi/gui.py#L710-L722
import os import logging from functools import cmp_to_key import collections from .server import runtimeInstances, update_event log = logging.getLogger('remi.gui') def decorate_set_on_listener(event_name, params): def add_annotation(function): function._event_listener = {} function._event_listener['eventName'] = event_name function._event_listener['prototype'] = params return function return add_annotation def decorate_constructor_parameter_types(type_list): def add_annotation(function): function._constructor_types = type_list return function return add_annotation def to_pix(x): return str(x) + 'px' def from_pix(x): v = 0 try: v = int(float(x.replace('px', ''))) except ValueError: log.error('error parsing px', exc_info=True) return v def jsonize(d): return ';'.join(map(lambda k, v: k + ':' + v + '', d.keys(), d.values())) class _VersionedDictionary(dict): def __init__(self, *args, **kwargs): self.__version__ = 0 super(_VersionedDictionary, self).__init__(*args, **kwargs) def __setitem__(self, key, value, version_increment=1): if key in self: if self[key] == value: return self.__version__ += version_increment return super(_VersionedDictionary, self).__setitem__(key, value) def __delitem__(self, key, version_increment=1): if not key in self: return self.__version__ += version_increment return super(_VersionedDictionary, self).__delitem__(key) def pop(self, key, d=None, version_increment=1): if not key in self: return self.__version__ += version_increment return super(_VersionedDictionary, self).pop(key, d) def clear(self, version_increment=1): self.__version__ += version_increment return super(_VersionedDictionary, self).clear() class _EventManager(object): def __init__(self): self.listeners = {} def propagate(self, eventname, params): if eventname not in self.listeners: return listener = self.listeners[eventname] return getattr(listener['instance'], listener['funcname'])(*params) def register_listener(self, eventname, instance, funcname): self.listeners[eventname] = {'instance': instance, 'funcname': funcname} class Tag(object): def __init__(self, **kwargs): self.kwargs = kwargs runtimeInstances[str(id(self))] = self self._render_children_list = [] self.children = _VersionedDictionary() self.attributes = _VersionedDictionary() self.style = _VersionedDictionary() self.type = kwargs.get('_type', '') self.attributes['id'] = str(id(self)) cls = kwargs.get('_class', self.__class__.__name__) if cls: self._classes = [cls] else: self._classes = [] @property def identifier(self): return self.attributes['id'] def repr(self, client, include_children=True): self.attributes['children_list'] = ','.join(map(lambda k, v: str( id(v)), self.children.keys(), self.children.values())) innerHTML = '' for s in self._render_children_list: if isinstance(s, type('')): innerHTML = innerHTML + s elif isinstance(s, type(u'')): innerHTML = innerHTML + s.encode('utf-8') elif include_children: innerHTML = innerHTML + s.repr(client) html = '<%s %s %s>%s</%s>' % (self.type, ' '.join('%s="%s"' % (k, v) if v is not None else k for k, v in self.attributes.items()), ('class="%s"' % ' '.join(self._classes)) if self._classes else '', innerHTML, self.type) return html def add_class(self, cls): self._classes.append(cls) def remove_class(self, cls): try: self._classes.remove(cls) except ValueError: pass def add_child(self, key, child): if hasattr(child, 'attributes'): child.attributes['parent_widget'] = str(id(self)) if key in self.children: self._render_children_list.remove(self.children[key]) self._render_children_list.append(child) self.children[key] = child def get_child(self, key): return self.children[key] def empty(self): for k in list(self.children.keys()): self.remove_child(self.children[k]) def remove_child(self, child): if child in self.children.values(): self._render_children_list.remove(child) for k in self.children.keys(): if str(id(self.children[k])) == str(id(child)): if k in self._render_children_list: self._render_children_list.remove(self.children[k]) self.children.pop(k) break class Widget(Tag): LAYOUT_HORIZONTAL = True LAYOUT_VERTICAL = False EVENT_ONCLICK = 'onclick' EVENT_ONDBLCLICK = 'ondblclick' EVENT_ONMOUSEDOWN = 'onmousedown' EVENT_ONMOUSEMOVE = 'onmousemove' EVENT_ONMOUSEOVER = 'onmouseover' EVENT_ONMOUSEOUT = 'onmouseout' EVENT_ONMOUSELEAVE = 'onmouseleave' EVENT_ONMOUSEUP = 'onmouseup' EVENT_ONTOUCHMOVE = 'ontouchmove' EVENT_ONTOUCHSTART = 'ontouchstart' EVENT_ONTOUCHEND = 'ontouchend' EVENT_ONTOUCHENTER = 'ontouchenter' EVENT_ONTOUCHLEAVE = 'ontouchleave' EVENT_ONTOUCHCANCEL = 'ontouchcancel' EVENT_ONKEYDOWN = 'onkeydown' EVENT_ONKEYPRESS = 'onkeypress' EVENT_ONKEYUP = 'onkeyup' EVENT_ONCHANGE = 'onchange' EVENT_ONFOCUS = 'onfocus' EVENT_ONBLUR = 'onblur' EVENT_ONCONTEXTMENU = "oncontextmenu" EVENT_ONUPDATE = 'onupdate' @decorate_constructor_parameter_types([]) def __init__(self, **kwargs): if '_type' not in kwargs: kwargs['_type'] = 'div' super(Widget, self).__init__(**kwargs) self.eventManager = _EventManager() self.oldRootWidget = None self.style['margin'] = kwargs.get('margin', '0px auto') self.set_layout_orientation(kwargs.get('layout_orientation', Widget.LAYOUT_VERTICAL)) self.set_size(kwargs.get('width'), kwargs.get('height')) def set_size(self, width, height): if width is not None: try: width = to_pix(int(width)) except ValueError: pass self.style['width'] = width if height is not None: try: height = to_pix(int(height)) except ValueError: pass self.style['height'] = height def set_layout_orientation(self, layout_orientation): self.layout_orientation = layout_orientation def redraw(self): update_event.set() def repr(self, client, include_children=True): self.attributes['style'] = jsonize(self.style) return super(Widget, self).repr(client, include_children) def append(self, value, key=''): if not isinstance(value, Widget): raise ValueError('value should be a Widget (otherwise use add_child(key,other)') key = str(id(value)) if key == '' else key self.add_child(key, value) if self.layout_orientation == Widget.LAYOUT_HORIZONTAL: if 'float' in self.children[key].style.keys(): if not (self.children[key].style['float'] == 'none'): self.children[key].style['float'] = 'left' else: self.children[key].style['float'] = 'left' return key def onfocus(self): return self.eventManager.propagate(self.EVENT_ONFOCUS, []) @decorate_set_on_listener("onfocus", "(self)") def set_on_focus_listener(self, listener, funcname): self.attributes[self.EVENT_ONFOCUS] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONFOCUS) self.eventManager.register_listener(self.EVENT_ONFOCUS, listener, funcname) def onblur(self): return self.eventManager.propagate(self.EVENT_ONBLUR, []) @decorate_set_on_listener("onblur", "(self)") def set_on_blur_listener(self, listener, funcname): self.attributes[self.EVENT_ONBLUR] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONBLUR) self.eventManager.register_listener(self.EVENT_ONBLUR, listener, funcname) def show(self, baseAppInstance): self.baseAppInstance = baseAppInstance self.oldRootWidget = self.baseAppInstance.client.root self.baseAppInstance.client.root = self def hide(self): if hasattr(self, 'baseAppInstance'): self.baseAppInstance.client.root = self.oldRootWidget def onclick(self): return self.eventManager.propagate(self.EVENT_ONCLICK, []) @decorate_set_on_listener("onclick", "(self)") def set_on_click_listener(self, listener, funcname): self.attributes[self.EVENT_ONCLICK] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" % (id(self), self.EVENT_ONCLICK) self.eventManager.register_listener(self.EVENT_ONCLICK, listener, funcname) def oncontextmenu(self): return self.eventManager.propagate(self.EVENT_ONCONTEXTMENU, []) @decorate_set_on_listener("oncontextmenu", "(self)") def set_on_contextmenu_listener(self, listener, funcname): self.attributes[self.EVENT_ONCONTEXTMENU] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONCONTEXTMENU) self.eventManager.register_listener(self.EVENT_ONCONTEXTMENU, listener, funcname) def onmousedown(self, x, y): return self.eventManager.propagate(self.EVENT_ONMOUSEDOWN, [x, y]) @decorate_set_on_listener("onmousedown", "(self,x,y)") def set_on_mousedown_listener(self, listener, funcname): self.attributes[self.EVENT_ONMOUSEDOWN] = "var params={};" "params['x']=event.clientX-this.offsetLeft;" "params['y']=event.clientY-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONMOUSEDOWN) self.eventManager.register_listener(self.EVENT_ONMOUSEDOWN, listener, funcname) def onmouseup(self, x, y): return self.eventManager.propagate(self.EVENT_ONMOUSEUP, [x, y]) @decorate_set_on_listener("onmouseup", "(self,x,y)") def set_on_mouseup_listener(self, listener, funcname): self.attributes[self.EVENT_ONMOUSEUP] = "var params={};" "params['x']=event.clientX-this.offsetLeft;" "params['y']=event.clientY-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONMOUSEUP) self.eventManager.register_listener(self.EVENT_ONMOUSEUP, listener, funcname) def onmouseout(self): return self.eventManager.propagate(self.EVENT_ONMOUSEOUT, []) @decorate_set_on_listener("onmouseout", "(self)") def set_on_mouseout_listener(self, listener, funcname): self.attributes[self.EVENT_ONMOUSEOUT] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONMOUSEOUT) self.eventManager.register_listener(self.EVENT_ONMOUSEOUT, listener, funcname) def onmouseleave(self): return self.eventManager.propagate(self.EVENT_ONMOUSELEAVE, []) @decorate_set_on_listener("onmouseleave", "(self)") def set_on_mouseleave_listener(self, listener, funcname): self.attributes[self.EVENT_ONMOUSELEAVE] = "sendCallback('%s','%s');" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONMOUSELEAVE) self.eventManager.register_listener(self.EVENT_ONMOUSELEAVE, listener, funcname) def onmousemove(self, x, y): return self.eventManager.propagate(self.EVENT_ONMOUSEMOVE, [x, y]) @decorate_set_on_listener("onmousemove", "(self,x,y)") def set_on_mousemove_listener(self, listener, funcname): self.attributes[self.EVENT_ONMOUSEMOVE] = "var params={};" "params['x']=event.clientX-this.offsetLeft;" "params['y']=event.clientY-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONMOUSEMOVE) self.eventManager.register_listener(self.EVENT_ONMOUSEMOVE, listener, funcname) def ontouchmove(self, x, y): return self.eventManager.propagate(self.EVENT_ONTOUCHMOVE, [x, y]) @decorate_set_on_listener("ontouchmove", "(self,x,y)") def set_on_touchmove_listener(self, listener, funcname): self.attributes[self.EVENT_ONTOUCHMOVE] = "var params={};" "params['x']=parseInt(event.changedTouches[0].clientX)-this.offsetLeft;" "params['y']=parseInt(event.changedTouches[0].clientY)-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONTOUCHMOVE) self.eventManager.register_listener(self.EVENT_ONTOUCHMOVE, listener, funcname) def ontouchstart(self, x, y): return self.eventManager.propagate(self.EVENT_ONTOUCHSTART, [x, y]) @decorate_set_on_listener("ontouchstart", "(self,x,y)") def set_on_touchstart_listener(self, listener, funcname): self.attributes[self.EVENT_ONTOUCHSTART] = "var params={};" "params['x']=parseInt(event.changedTouches[0].clientX)-this.offsetLeft;" "params['y']=parseInt(event.changedTouches[0].clientY)-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONTOUCHSTART) self.eventManager.register_listener(self.EVENT_ONTOUCHSTART, listener, funcname) def ontouchend(self, x, y): return self.eventManager.propagate(self.EVENT_ONTOUCHEND, [x, y]) @decorate_set_on_listener("ontouchend", "(self,x,y)") def set_on_touchend_listener(self, listener, funcname): self.attributes[self.EVENT_ONTOUCHEND] = "var params={};" "params['x']=parseInt(event.changedTouches[0].clientX)-this.offsetLeft;" "params['y']=parseInt(event.changedTouches[0].clientY)-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONTOUCHEND) self.eventManager.register_listener(self.EVENT_ONTOUCHEND, listener, funcname) def ontouchenter(self, x, y): return self.eventManager.propagate(self.EVENT_ONTOUCHENTER, [x, y]) @decorate_set_on_listener("ontouchenter", "(self,x,y)") def set_on_touchenter_listener(self, listener, funcname): self.attributes[self.EVENT_ONTOUCHENTER] = "var params={};" "params['x']=parseInt(event.changedTouches[0].clientX)-this.offsetLeft;" "params['y']=parseInt(event.changedTouches[0].clientY)-this.offsetTop;" "sendCallbackParam('%s','%s',params);" "event.stopPropagation();event.preventDefault();" "return false;" % (id(self), self.EVENT_ONTOUCHENTER) self.eventManager.register_listener(self.EVENT_ONTOUCHENTER, listener, funcname) def ontouchleave(self): return self.eventManager.propagate(self.EVENT_ONTOUCHLEAVE, []) @decorate_set_on_listener("ontouchleave", "(self)")
MIT License
azure/azure-devops-cli-extension
azure-devops/azext_devops/devops_sdk/v5_0/client_factory.py
ClientFactoryV5_0.get_upack_packaging_client
python
def get_upack_packaging_client(self): return self._connection.get_client('azure.devops.v5_0.upack_packaging.upack_packaging_client.UPackPackagingClient')
get_upack_packaging_client. Gets the 5.0 version of the UPackPackagingClient :rtype: :class:`<UPackPackagingClient> <azure.devops.v5_0.upack_packaging.upack_packaging_client.UPackPackagingClient>`
https://github.com/azure/azure-devops-cli-extension/blob/5f33f7d81a9c2d2990044fbd9ffa6b535cbda528/azure-devops/azext_devops/devops_sdk/v5_0/client_factory.py#L340-L345
 class ClientFactoryV5_0(object): def __init__(self, connection): self._connection = connection def get_accounts_client(self): return self._connection.get_client('azure.devops.v5_0.accounts.accounts_client.AccountsClient') def get_boards_client(self): return self._connection.get_client('azure.devops.v5_0.boards.boards_client.BoardsClient') def get_build_client(self): return self._connection.get_client('azure.devops.v5_0.build.build_client.BuildClient') def get_client_trace_client(self): return self._connection.get_client('azure.devops.v5_0.client_trace.client_trace_client.ClientTraceClient') def get_cloud_load_test_client(self): return self._connection.get_client('azure.devops.v5_0.cloud_load_test.cloud_load_test_client.CloudLoadTestClient') def get_contributions_client(self): return self._connection.get_client('azure.devops.v5_0.contributions.contributions_client.ContributionsClient') def get_core_client(self): return self._connection.get_client('azure.devops.v5_0.core.core_client.CoreClient') def get_customer_intelligence_client(self): return self._connection.get_client('azure.devops.v5_0.customer_intelligence.customer_intelligence_client.CustomerIntelligenceClient') def get_dashboard_client(self): return self._connection.get_client('azure.devops.v5_0.dashboard.dashboard_client.DashboardClient') def get_extension_management_client(self): return self._connection.get_client('azure.devops.v5_0.extension_management.extension_management_client.ExtensionManagementClient') def get_feature_availability_client(self): return self._connection.get_client('azure.devops.v5_0.feature_availability.feature_availability_client.FeatureAvailabilityClient') def get_feature_management_client(self): return self._connection.get_client('azure.devops.v5_0.feature_management.feature_management_client.FeatureManagementClient') def get_feed_client(self): return self._connection.get_client('azure.devops.v5_0.feed.feed_client.FeedClient') def get_feed_token_client(self): return self._connection.get_client('azure.devops.v5_0.feed_token.feed_token_client.FeedTokenClient') def get_file_container_client(self): return self._connection.get_client('azure.devops.v5_0.file_container.file_container_client.FileContainerClient') def get_gallery_client(self): return self._connection.get_client('azure.devops.v5_0.gallery.gallery_client.GalleryClient') def get_git_client(self): return self._connection.get_client('azure.devops.v5_0.git.git_client.GitClient') def get_graph_client(self): return self._connection.get_client('azure.devops.v5_0.graph.graph_client.GraphClient') def get_identity_client(self): return self._connection.get_client('azure.devops.v5_0.identity.identity_client.IdentityClient') def get_licensing_client(self): return self._connection.get_client('azure.devops.v5_0.licensing.licensing_client.LicensingClient') def get_location_client(self): return self._connection.get_client('azure.devops.v5_0.location.location_client.LocationClient') def get_maven_client(self): return self._connection.get_client('azure.devops.v5_0.maven.maven_client.MavenClient') def get_member_entitlement_management_client(self): return self._connection.get_client('azure.devops.v5_0.member_entitlement_management.member_entitlement_management_client.MemberEntitlementManagementClient') def get_notification_client(self): return self._connection.get_client('azure.devops.v5_0.notification.notification_client.NotificationClient') def get_npm_client(self): return self._connection.get_client('azure.devops.v5_0.npm.npm_client.NpmClient') def get_nuget_client(self): return self._connection.get_client('azure.devops.v5_0.nuget.nuget_client.NuGetClient') def get_operations_client(self): return self._connection.get_client('azure.devops.v5_0.operations.operations_client.OperationsClient') def get_policy_client(self): return self._connection.get_client('azure.devops.v5_0.policy.policy_client.PolicyClient') def get_profile_client(self): return self._connection.get_client('azure.devops.v5_0.profile.profile_client.ProfileClient') def get_project_analysis_client(self): return self._connection.get_client('azure.devops.v5_0.project_analysis.project_analysis_client.ProjectAnalysisClient') def get_provenance_client(self): return self._connection.get_client('azure.devops.v5_0.provenance.provenance_client.ProvenanceClient') def get_py_pi_api_client(self): return self._connection.get_client('azure.devops.v5_0.py_pi_api.py_pi_api_client.PyPiApiClient') def get_release_client(self): return self._connection.get_client('azure.devops.v5_0.release.release_client.ReleaseClient') def get_search_client(self): return self._connection.get_client('azure.devops.v5_0.search.search_client.SearchClient') def get_security_client(self): return self._connection.get_client('azure.devops.v5_0.security.security_client.SecurityClient') def get_service_endpoint_client(self): return self._connection.get_client('azure.devops.v5_0.service_endpoint.service_endpoint_client.ServiceEndpointClient') def get_service_hooks_client(self): return self._connection.get_client('azure.devops.v5_0.service_hooks.service_hooks_client.ServiceHooksClient') def get_settings_client(self): return self._connection.get_client('azure.devops.v5_0.settings.settings_client.SettingsClient') def get_symbol_client(self): return self._connection.get_client('azure.devops.v5_0.symbol.symbol_client.SymbolClient') def get_task_client(self): return self._connection.get_client('azure.devops.v5_0.task.task_client.TaskClient') def get_task_agent_client(self): return self._connection.get_client('azure.devops.v5_0.task_agent.task_agent_client.TaskAgentClient') def get_test_client(self): return self._connection.get_client('azure.devops.v5_0.test.test_client.TestClient') def get_tfvc_client(self): return self._connection.get_client('azure.devops.v5_0.tfvc.tfvc_client.TfvcClient') def get_token_admin_client(self): return self._connection.get_client('azure.devops.v5_0.token_admin.token_admin_client.TokenAdminClient') def get_token_administration_client(self): return self._connection.get_client('azure.devops.v5_0.token_administration.token_administration_client.TokenAdministrationClient') def get_upack_api_client(self): return self._connection.get_client('azure.devops.v5_0.upack_api.upack_api_client.UPackApiClient')
MIT License
nccgroup/depthcharge
python/depthcharge/memory/patch.py
MemoryPatch.from_tuple
python
def from_tuple(cls, src: tuple): src_len = len(src) if src_len == 4: exp = src[2] desc = src[3] elif src_len == 3: exp = src[2] if isinstance(src[2], bytes) else None desc = src[2] if isinstance(src[2], str) else None elif src_len == 2: exp = None desc = None else: err = 'Invalid number of elements ({:d})' raise ValueError(err.format(src_len)) return cls(src[0], src[1], exp, desc)
Create a :py:class:`.MemoryPatch` object from a tuple with the following elements: +-------+-------+----------------------------------------------------------------------+ | Index | Type | Description | +=======+=======+======================================================================+ | 0 | int | Address to apply patch to | +-------+-------+----------------------------------------------------------------------+ | 1 | bytes | Data to write to the target address | +-------+-------+----------------------------------------------------------------------+ | 2 | bytes | Value expected to reside at target address. Optional; may be ``None``| +-------+-------+----------------------------------------------------------------------+ | 3 | str | Description of patch. Optional may be ``None`` or empty string. | +-------+-------+----------------------------------------------------------------------+
https://github.com/nccgroup/depthcharge/blob/9b66d1c2a80b9398ac561c83173ebd748aef018d/python/depthcharge/memory/patch.py#L70-L101
class MemoryPatch: def __init__(self, addr: int, value: bytes, expected: bytes = None, desc=''): self._addr = addr self._val = value self._exp = expected self._desc = desc if expected is not None and len(expected) != len(value): err = 'Expected data is {:d} bytes, but patch value is {:d} bytes' raise ValueError(err.format(len(expected), len(value))) @property def address(self) -> int: return self._addr @property def value(self) -> bytes: return self._val @property def expected(self) -> bytes: return self._exp @property def description(self) -> str: suffix = ' @ 0x{:08x}'.format(self._addr) if self._desc is None: return 'Patch' + suffix return self._desc + suffix @classmethod
BSD 3-Clause New or Revised License
arangodb-community/pyarango
pyArango/theExceptions.py
AQLFetchError.__init__
python
def __init__(self, err_message): Exception.__init__(self, err_message)
Error when unable to fetch. Parameters ---------- err_message : str error message.
https://github.com/arangodb-community/pyarango/blob/db758bf6ffab47fee02bec3f960f87065b28bc33/pyArango/theExceptions.py#L196-L205
class pyArangoException(Exception): def __init__(self, message, errors = None): Exception.__init__(self, message) if errors is None: errors = {} self.message = message self.errors = errors def __str__(self): return self.message + ". Errors: " + str(self.errors) class ConnectionError(pyArangoException): def __init__(self, message, URL, statusCode = "", errors = None): if errors is None: errors = {} mes = "%s. URL: %s, status: %s" % (message, URL, statusCode) pyArangoException.__init__(self, mes, errors) class ArangoError(pyArangoException): def __init__(self, errorObject): self.errorNum = errorObject['errorNum'] pyArangoException.__init__(self, errorObject['errorMessage'], errorObject) class CreationError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class IndexError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class UpdateError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class DeletionError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class TraversalError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class ValidationError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class SchemaViolation(pyArangoException): def __init__(self, collection, field, errors = None): if errors is None: errors = {} message = "Collection '%s' does not have a field '%s' in it's schema" % (collection.__name__, field) pyArangoException.__init__(self, message, errors) class InvalidDocument(pyArangoException): def __init__(self, errors): message = "Unsuccesful validation" self.strErrors = [] for k, v in errors.items(): self.strErrors.append("%s -> %s" % (k, v)) self.strErrors = '\n\t'.join(self.strErrors) pyArangoException.__init__(self, message, errors) def __str__(self): strErrors = [] for k, v in self.errors.items(): strErrors.append("%s -> %s" % (k, v)) strErrors = '\n\t'.join(strErrors) return self.message + ":\n\t" + strErrors class SimpleQueryError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class BulkOperationError(pyArangoException): def __init__(self, message): self._errors = [] self._errmsgs = [] self._documents = [] pyArangoException.__init__(self, "Batch error - + " + message) def addBulkError(self, error, document): self._errors.append(error) self._errmsgs.append(str(error)) self._documents.append(document) def __str__(self): strErrors = [] i = 0 for errMsg in self._errmsgs: err = "<unknown>" docstr = "<unknown>" try: err = errMsg except: pass try: docstr = self._documents[i] except: pass strErrors.append("\t<%s> -> %s" % (err, docstr)) i+=1 strErrors = '\n\t'.join(strErrors) return self.message + ":\n\t" + strErrors class QueryError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class AQLQueryError(pyArangoException): def __init__(self, message, query, errors = None): if errors is None: errors = {} lq = [] for i, ll in enumerate(query.split("\n")): lq.append("%s: %s" % (i+1, ll)) lq = '\n'.join(lq) message = "Error in:\n%s.\n->%s" % (lq, message) pyArangoException.__init__(self, message, errors) class CursorError(pyArangoException): def __init__(self, message, cursorId, errors = None): if errors is None: errors = {} message = "Unable to retreive data for cursor %s: %s" % (cursorId, message) pyArangoException.__init__(self, message, errors) class TransactionError(pyArangoException): def __init__(self, message, action, errors = None): if errors is None: errors = {} message = "Error in: %s.\n->%s" % (action, message) pyArangoException.__init__(self, message, errors) class AbstractInstanciationError(Exception): def __init__(self, cls): self.cls = cls self.message = "%s is abstract and is not supposed to be instanciated. Collections my inherit from it" % self.cls.__name__ Exception.__init__(self, self.message) def __str__(self): return self.message class ExportError(pyArangoException): def __init__(self, message, errors = None ): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class DocumentNotFoundError(pyArangoException): def __init__(self, message, errors = None): if errors is None: errors = {} pyArangoException.__init__(self, message, errors) class AQLFetchError(Exception):
Apache License 2.0
sigsep/open-unmix-pytorch
openunmix/filtering.py
_mul_add
python
def _mul_add(a: torch.Tensor, b: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: target_shape = torch.Size([max(sa, sb) for (sa, sb) in zip(a.shape, b.shape)]) if out is None or out.shape != target_shape: out = torch.zeros(target_shape, dtype=a.dtype, device=a.device) if out is a: real_a = a[..., 0] out[..., 0] = out[..., 0] + (real_a * b[..., 0] - a[..., 1] * b[..., 1]) out[..., 1] = out[..., 1] + (real_a * b[..., 1] + a[..., 1] * b[..., 0]) else: out[..., 0] = out[..., 0] + (a[..., 0] * b[..., 0] - a[..., 1] * b[..., 1]) out[..., 1] = out[..., 1] + (a[..., 0] * b[..., 1] + a[..., 1] * b[..., 0]) return out
Element-wise multiplication of two complex Tensors described through their real and imaginary parts. The result is added to the `out` tensor
https://github.com/sigsep/open-unmix-pytorch/blob/49e65ac367cc2ab9fa3f6f41dd9dd778223ca67d/openunmix/filtering.py#L50-L66
from typing import Optional import torch import torch.nn as nn from torch import Tensor from torch.utils.data import DataLoader def atan2(y, x): pi = 2 * torch.asin(torch.tensor(1.0)) x += ((x == 0) & (y == 0)) * 1.0 out = torch.atan(y / x) out += ((y >= 0) & (x < 0)) * pi out -= ((y < 0) & (x < 0)) * pi out *= 1 - ((y > 0) & (x == 0)) * 1.0 out += ((y > 0) & (x == 0)) * (pi / 2) out *= 1 - ((y < 0) & (x == 0)) * 1.0 out += ((y < 0) & (x == 0)) * (-pi / 2) return out def _norm(x: torch.Tensor) -> torch.Tensor: return torch.abs(x[..., 0]) ** 2 + torch.abs(x[..., 1]) ** 2
MIT License
zapatacomputing/z-quantum-core
src/python/zquantum/core/bitstring_distribution/distance_measures/mmd.py
compute_rbf_kernel
python
def compute_rbf_kernel(x_i: np.ndarray, y_j: np.ndarray, sigma: float) -> np.ndarray: exponent = np.abs(x_i[:, None] - y_j[None, :]) ** 2 try: gamma = 1.0 / (2 * sigma) except ZeroDivisionError as error: print("Handling run-time error:", error) raise kernel_matrix = np.exp(-gamma * exponent) return kernel_matrix
Compute the gaussian (RBF) kernel matrix K, with K_ij = exp(-gamma |x_i - y_j|^2) and gamma = 1/(2*sigma). Args: x_i: Samples A (integers). y_j: Samples B (integers). sigma: The bandwidth of the gaussian kernel. Returns: np.ndarray: The gaussian kernel matrix.
https://github.com/zapatacomputing/z-quantum-core/blob/5fa4fd5d8682bbae696f8c2c2d386133ccf7f378/src/python/zquantum/core/bitstring_distribution/distance_measures/mmd.py#L34-L53
from typing import TYPE_CHECKING, Dict, List, Sequence, Union import numpy as np if TYPE_CHECKING: from zquantum.core.bitstring_distribution import BitstringDistribution
Apache License 2.0
giacomocerquone/univaqbot
libs/utils.py
db_connection
python
def db_connection(): try: conn = pymongo.MongoClient(os.environ['MONGODB_URI']) print("Connected successfully!") except (pymongo.errors.ConnectionFailure) as err: print("Could not connect to MongoDB: %s" % err) global DATABASE DATABASE = conn.get_default_database()
Get MongoDB connection
https://github.com/giacomocerquone/univaqbot/blob/754519befe24dcc3c4a658160b727de7b14a6b21/libs/utils.py#L33-L43
import logging import os import bs4 import requests import pymongo from telegram import TelegramError DATABASE = "" USERS = { 'telegramID': [], 'disim': [], 'univaq': [], 'discab_general': [], 'discab_biotechnology': [], 'discab_medical':[], 'discab_motor_science': [], 'discab_psychology': [], 'mesva_general': [], 'mesva_medical': [], 'mesva_environmental_science': [], 'mesva_biological_science': [] } NEWS = {}
MIT License
bilylee/siamfc-tensorflow
scripts/build_VID2015_imdb.py
Dataset._get_unique_trackids
python
def _get_unique_trackids(self, video_dir): x_image_paths = glob.glob(video_dir + '/*.crop.x.jpg') trackids = [os.path.basename(path).split('.')[1] for path in x_image_paths] unique_trackids = set(trackids) return unique_trackids
Get unique trackids within video_dir
https://github.com/bilylee/siamfc-tensorflow/blob/f572dca95f2b3b2861f54de467259753428e468c/scripts/build_VID2015_imdb.py#L47-L52
from __future__ import absolute_import from __future__ import division from __future__ import print_function import glob import os import os.path as osp import pickle import sys import numpy as np import tensorflow as tf CURRENT_DIR = osp.dirname(__file__) sys.path.append(osp.join(CURRENT_DIR, '..')) from utils.misc_utils import sort_nicely class Config: dataset_dir = 'data/ILSVRC2015-VID-Curation' save_dir = 'data/' validation_ratio = 0.1 class DataIter: pass class Dataset: def __init__(self, config): self.config = config
MIT License
jefkine/zeta-learn
ztlearn/utils/data_utils.py
minibatches
python
def minibatches(input_data, input_label, batch_size, shuffle): assert input_data.shape[0] == input_label.shape[0], 'input data and label sizes do not match!' minibatches = [] indices = np.arange(input_data.shape[0]) if shuffle: np.random.shuffle(indices) for idx in range(0, input_data.shape[0], batch_size): mini_batch = indices[idx:idx + batch_size] minibatches.append((input_data[mini_batch], input_label[mini_batch])) return minibatches
generate minibatches on a given input data matrix
https://github.com/jefkine/zeta-learn/blob/04388f90093b52f5df2f334c898f3a1224f5a13f/ztlearn/utils/data_utils.py#L98-L111
import os import sys import gzip import urllib import tarfile import zipfile import numpy as np from itertools import chain from itertools import combinations from itertools import combinations_with_replacement def eucledian_norm(vec_a, vec_b): distance = vec_a - vec_b return np.linalg.norm(distance, ord = 'fro', axis = 1) def clip_gradients(grad, g_min = -1., g_max = 1.): return np.clip(grad, g_min, g_max, out = grad) def accuracy_score(predictions, targets): return np.mean(predictions == targets) def one_hot(labels, num_classes = None): num_classes = np.max(labels.astype('int')) + 1 if not num_classes else num_classes one_hot_labels = np.zeros([labels.size, num_classes]) one_hot_labels[np.arange(labels.size), labels.astype('int')] = 1. return one_hot_labels def unhot(one_hot, unhot_axis = 1): return np.argmax(one_hot, axis = unhot_axis) def imbalanced_dataset(input_data, input_label, digit, remainder): input_label_ids = np.squeeze(np.array(np.where(input_label == digit)), axis = 0) remainder_ids = input_label_ids[remainder:] return np.delete(input_data, remainder_ids, 0), np.delete(input_label, remainder_ids, 0) def shuffle_data(input_data, input_label, random_seed = None): assert input_data.shape[0] == input_label.shape[0], 'input data and label sizes do not match!' if random_seed is not None: np.random.seed(random_seed) indices = np.arange(input_data.shape[0]) np.random.shuffle(indices) return input_data[indices], input_label[indices] def train_test_split(samples, labels, test_size = 0.2, shuffle = True, random_seed = None, cut_off = None): if shuffle: samples, labels = shuffle_data(samples, labels, random_seed) split_ratio = int((1.0 - test_size) * len(samples)) samples_train, samples_test = samples[:split_ratio], samples[split_ratio:] labels_train, labels_test = labels[:split_ratio], labels[split_ratio:] if cut_off is not None and isinstance(cut_off, (int, np.integer)): return samples_train[:cut_off], samples_test[:cut_off], labels_train[:cut_off], labels_test[:cut_off] return samples_train, samples_test, labels_train, labels_test def kfold_split(samples, labels, n_splits = 5, shuffle = False, random_seed = None): if shuffle: samples, labels = shuffle_data(samples, labels, random_seed) def get_folds(input_data, num_rows): fold_size = int(num_rows / n_splits) for idx in range(0, num_rows, fold_size): yield input_data[idx:idx + fold_size] sample_folds = list(get_folds(samples, samples.shape[0])) label_folds = list(get_folds(labels, samples.shape[0])) return sample_folds, label_folds
MIT License
openschc/openschc
src/frag_tile.py
TileList.unset_sent_flag
python
def unset_sent_flag(self, win, bit_list): def unset_sent_flag_do(wn, tn): if tn is None: dprint("last tile case") return counter = 0 dprint('unset_sent_flag_do') for t in self.all_tiles: if t["w-num"] == wn: if t["t-num"] == self.max_fcn - tn: counter += 1 dprint('counter = {}, t-num {}, tn {}'.format(counter, t["t-num"],tn)) t["sent"] = False elif t["t-num"] == self.max_fcn: dprint("t-num {} == max_fcn {}".format(t["t-num"],self.max_fcn)) dprint("unset_sent_flag") dprint("bit_list -> {}".format(bit_list)) dprint("self.max_w_num:{} win:{}, len(bit_list[:-1]):{}".format(self.max_w_num, win, len(bit_list[:-1]))) if self.max_w_num == win: dprint("last window") dprint("self.all_tiles -> {}".format(self.all_tiles)) for bi in range(len(bit_list[:-1])): dprint("bi -> {}".format(bi)) if bit_list[bi] == 0: unset_sent_flag_do(win, bi) if bit_list[-1] == 1: dprint("Problem in tx, the last bit is set as 1") dprint("self.all_tiles -> {}".format(self.all_tiles)) self.all_tiles[-1]["sent"] = True else: dprint("not last window") for bi in range(len(bit_list)): if bit_list[bi] == 0: unset_sent_flag_do(win, bi) dprint("self.all_tiles -> {}".format(self.all_tiles))
set the sent flag to False from True.
https://github.com/openschc/openschc/blob/7b0c165a27936d8f2732a90844a00c5ade23eea5/src/frag_tile.py#L108-L161
from gen_base_import import * from gen_utils import dprint import frag_msg from compr_core import * class TileList(): def __init__(self, rule, packet_bbuf, l2word=8): self.rule = rule self.t_size = rule[T_FRAG][T_FRAG_PROF][T_FRAG_TILE] assert self.t_size >= l2word self.max_fcn = frag_msg.get_max_fcn(rule) self.all_tiles = [] w_num = 0 t_num = self.max_fcn bbuf = packet_bbuf.copy() bbuf_bits_size = bbuf.count_added_bits() nb_full_size_tiles, last_tile_size = ( bbuf_bits_size // self.t_size, bbuf_bits_size % self.t_size) if last_tile_size == 0: tiles = [ bbuf.get_bits_as_buffer(self.t_size) for _ in range(nb_full_size_tiles) ] elif last_tile_size >= l2word: tiles = [ bbuf.get_bits_as_buffer(self.t_size) for _ in range(nb_full_size_tiles) ] tiles.append(bbuf.get_bits_as_buffer(last_tile_size)) else: if nb_full_size_tiles >= 1: tiles = [ bbuf.get_bits_as_buffer(self.t_size) for _ in range(nb_full_size_tiles-1) ] tiles.append(bbuf.get_bits_as_buffer(self.t_size-l2word)) tiles.append(bbuf.get_bits_as_buffer(last_tile_size+l2word)) else: tiles.append(bbuf.get_bits_as_buffer(last_tile_size)) for t in tiles: tile_obj = { "w-num": w_num, "t-num": t_num, "tile": t, "sent": False, } self.all_tiles.append(tile_obj) if t_num == 0: t_num = self.max_fcn w_num += 1 else: t_num -= 1 if frag_msg.get_win_all_1(rule) < w_num: raise ValueError( "ERROR: the packet size > WSize. {} > {}".format( w_num, frag_msg.get_win_all_1(rule))) self.max_w_num = w_num def get_tiles(self, mtu_size): remaining_size = mtu_size - frag_msg.get_sender_header_size(self.rule) max_tiles = remaining_size // self.t_size tiles = [] t_prev = None for i in range(len(self.all_tiles)): t = self.all_tiles[i] if t["sent"] == False: tiles.append(t) t["sent"] = True t_prev = t if len(tiles) == max_tiles: break if len(tiles) == 0: return None, 0, remaining_size nb_remaining_tiles = len( [ _ for _ in self.all_tiles if _["sent"] == False ]) remaining_size -= self.get_tile_size(tiles) return tiles, nb_remaining_tiles, remaining_size def get_all_tiles(self): return self.all_tiles
MIT License
ebay/accelerator
accelerator/extras.py
json_encode
python
def json_encode(variable, sort_keys=True, as_str=False): if sort_keys: dict_type = dict else: dict_type = OrderedDict def typefix(e): if isinstance(e, dict): return dict_type((typefix(k), typefix(v)) for k, v in iteritems(e)) elif isinstance(e, (list, tuple, set,)): return [typefix(v) for v in e] elif PY2 and isinstance(e, bytes): return uni(e) else: return e variable = typefix(variable) res = json.dumps(variable, indent=4, sort_keys=sort_keys) if PY3 and not as_str: res = res.encode('ascii') return res
Return variable serialised as json bytes (or str with as_str=True). You can pass tuples and sets (saved as lists). On py2 you can also pass bytes that will be passed through compat.uni. If you set sort_keys=False you can use OrderedDict to get whatever order you like.
https://github.com/ebay/accelerator/blob/4c053465b893e8ece354c26953fd168a36edccc1/accelerator/extras.py#L107-L133
from __future__ import print_function from __future__ import division import os import datetime import json from traceback import print_exc from collections import OrderedDict import sys from accelerator.compat import PY2, PY3, pickle, izip, iteritems, first_value from accelerator.compat import num_types, uni, unicode, str_types from accelerator.job import Job, JobWithFile from accelerator.statmsg import status def _fn(filename, jobid, sliceno): if filename.startswith('/'): assert not jobid, "Don't specify full path (%r) and jobid (%s)." % (filename, jobid,) assert not sliceno, "Don't specify full path (%r) and sliceno." % (filename,) elif jobid: filename = Job(jobid).filename(filename, sliceno) elif sliceno is not None: filename = '%s.%d' % (filename, sliceno,) return filename def _typelistnone(v, t): if isinstance(v, list): return [t(v) if v else None for v in v] elif v: return t(v) else: return None def _job_params(jobid): from accelerator.setupfile import load_setup d = load_setup(jobid) _apply_typing(d.options, d.get('_typing', ())) return d def job_params(jobid=None, default_empty=False): if default_empty and not jobid: return DotDict( options=DotDict(), datasets=DotDict(), jobs=DotDict(), ) from accelerator.dataset import Dataset from accelerator.job import Job d = _job_params(jobid) d.datasets = DotDict({k: _typelistnone(v, Dataset) for k, v in d.datasets.items()}) d.jobs = DotDict({k: _typelistnone(v, Job) for k, v in d.jobs.items()}) d.jobid = Job(d.jobid) return d def job_post(jobid): job = Job(jobid) d = job.json_load('post.json') version = d.get('version', 0) if version == 0: prefix = job.path + '/' d.files = sorted(fn[len(prefix):] if fn.startswith(prefix) else fn for fn in d.files) version = 1 if version != 1: raise Exception("Don't know how to load post.json version %d (in %s)" % (d.version, jobid,)) return d def pickle_save(variable, filename='result.pickle', sliceno=None, temp=None, _hidden=False): filename = _fn(filename, None, sliceno) with FileWriteMove(filename, temp, _hidden=_hidden) as fh: pickle.dump(variable, fh, 2) def pickle_load(filename='result.pickle', jobid=None, sliceno=None, encoding='bytes'): filename = _fn(filename, jobid, sliceno) with status('Loading ' + filename): with open(filename, 'rb') as fh: if PY3: return pickle.load(fh, encoding=encoding) else: return pickle.load(fh)
Apache License 2.0
databiosphere/toil
src/toil/lib/bioio.py
system
python
def system(command): logger.warning('Deprecated toil method that will be moved/replaced in a future release."') logger.debug(f'Running: {command}') subprocess.check_call(command, shell=isinstance(command, str), bufsize=-1)
A convenience wrapper around subprocess.check_call that logs the command before passing it on. The command can be either a string or a sequence of strings. If it is a string shell=True will be passed to subprocess.check_call. :type command: str | sequence[string]
https://github.com/databiosphere/toil/blob/eb2ae8365ae2ebdd50132570b20f7d480eb40cac/src/toil/lib/bioio.py#L25-L34
import logging import subprocess from toil.statsAndLogging import (logger, root_logger, set_logging_from_options) from toil.test import get_temp_file
Apache License 2.0
dynatrace-oss/api-client-python
dynatrace/environment_v1/deployment.py
DeploymentService.get_gateway_installer_connection_info
python
def get_gateway_installer_connection_info(self, network_zone: Optional[str] = "default") -> "ActiveGateConnectionInfo": params = {"networkZone": network_zone} response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_GATEWAY}/connectioninfo", params=params) return ActiveGateConnectionInfo(raw_element=response.json())
Gets the connectivity information for Environment ActiveGate. :param network_zone: The network zone you want the result to be configured with. :returns ActiveGateConnectionInfo: connectivity information
https://github.com/dynatrace-oss/api-client-python/blob/7749125ab384d36e9a00d5d8dc5964cce4d46f66/dynatrace/environment_v1/deployment.py#L178-L188
from typing import Optional, Dict, List, Any from requests import Response from dynatrace.dynatrace_object import DynatraceObject from dynatrace.http_client import HttpClient class DeploymentService: ENDPOINT_INSTALLER_AGENT = "/api/v1/deployment/installer/agent" ENDPOINT_INSTALLER_GATEWAY = "/api/v1/deployment/installer/gateway" ENDPOINT_BOSHRELEASE = "/api/v1/deployment/boshrelease" ENDPOINT_LAMBDA = "/api/v1/deployment/lambda/agent/latest" ENDPOINT_ORCHESTRATION = "/api/v1/deployment/orchestration/agent" def __init__(self, http_client: HttpClient): self.__http_client = http_client def get_agent_installer_latest_metainfo( self, os_type: str, installer_type: str, flavor: Optional[str] = None, arch: Optional[str] = None, bitness: Optional[str] = None ) -> "InstallerMetaInfoDto": params = {"flavor": flavor, "arch": arch, "bitness": bitness} response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/{os_type}/{installer_type}/latest/metainfo", params=params) return InstallerMetaInfoDto(raw_element=response.json()) def get_agent_installer( self, os_type: str, installer_type: str, version: str = "latest", flavor: Optional[str] = None, arch: Optional[str] = None, bitness: Optional[str] = None, include: Optional[List[str]] = None, skip_metadata: Optional[bool] = None, network_zone: Optional[str] = None, if_none_match: Optional[str] = None, ) -> "Response": if version != "latest": version = "version/" + version params = { "flavor": flavor, "arch": arch, "bitness": bitness, "include": "&include=".join(include) if include else None, "skipMetadata": skip_metadata, "networkZone": network_zone, } headers = {"If-None-Match": if_none_match} if if_none_match else None return self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/{os_type}/{installer_type}/{version}", params=params, headers=headers) def get_agent_installer_connection_info(self, network_zone: Optional[str] = "default", version: Optional[str] = None) -> "ConnectionInfo": params = {"networkZone": network_zone, "version": version} response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/connectioninfo", params=params) return ConnectionInfo(raw_element=response.json()) def get_agent_installer_connection_endpoints(self, network_zone: Optional[str] = "default") -> str: params = {"networkZone": network_zone} return self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/connectioninfo/endpoints", params=params).text def list_agent_installer_versions( self, os_type: str, installer_type: str, flavor: Optional[str] = None, arch: Optional[str] = None ) -> "AgentInstallerVersions": params = {"flavor": flavor, "arch": arch} response = self.__http_client.make_request(path=f"{self.ENDPOINT_INSTALLER_AGENT}/versions/{os_type}/{installer_type}", params=params) return AgentInstallerVersions(raw_element=response.json())
Apache License 2.0
caktus/django-sticky-uploads
stickyuploads/views.py
UploadView.post
python
def post(self, *args, **kwargs): if self.upload_allowed(): form = self.get_upload_form() result = {} if form.is_valid(): storage = self.get_storage() result['is_valid'] = True info = form.stash(storage, self.request.path) result.update(info) else: result.update({ 'is_valid': False, 'errors': form.errors, }) return HttpResponse(json.dumps(result), content_type='application/json') else: return HttpResponseForbidden()
Save file and return saved info or report errors.
https://github.com/caktus/django-sticky-uploads/blob/a57539655ba991f63f31f0a5c98d790947bcd1b8/stickyuploads/views.py#L18-L35
from __future__ import unicode_literals import json from django.core.files.storage import get_storage_class from django.http import HttpResponse, HttpResponseForbidden from django.views.generic import View from .forms import UploadForm class UploadView(View): form_class = UploadForm storage_class = 'stickyuploads.storage.TempFileSystemStorage'
BSD 3-Clause New or Revised License
jmcarp/flask-apispec
flask_apispec/extension.py
FlaskApiSpec._register
python
def _register(self, target, endpoint=None, blueprint=None, resource_class_args=None, resource_class_kwargs=None): if isinstance(target, types.FunctionType): paths = self.view_converter.convert(target, endpoint, blueprint) elif isinstance(target, ResourceMeta): paths = self.resource_converter.convert( target, endpoint, blueprint, resource_class_args=resource_class_args, resource_class_kwargs=resource_class_kwargs, ) else: raise TypeError() for path in paths: self.spec.path(**path)
Register a view. :param target: view function or view class. :param endpoint: (optional) endpoint name. :param blueprint: (optional) blueprint name. :param tuple resource_class_args: (optional) args to be forwarded to the view class constructor. :param dict resource_class_kwargs: (optional) kwargs to be forwarded to the view class constructor.
https://github.com/jmcarp/flask-apispec/blob/de6f5adbcf3e6fce14aa1e1288ac2e401fd9ca35/flask_apispec/extension.py#L127-L152
import flask import functools import types from apispec import APISpec from apispec.ext.marshmallow import MarshmallowPlugin from flask_apispec import ResourceMeta from flask_apispec.apidoc import ViewConverter, ResourceConverter class FlaskApiSpec: def __init__(self, app=None, document_options=True): self._deferred = [] self.app = app self.view_converter = None self.resource_converter = None self.spec = None self.document_options = document_options if app: self.init_app(app) def init_app(self, app): self.app = app self.spec = self.app.config.get('APISPEC_SPEC') or make_apispec(self.app.config.get('APISPEC_TITLE', 'flask-apispec'), self.app.config.get('APISPEC_VERSION', 'v1'), self.app.config.get('APISPEC_OAS_VERSION', '2.0')) self.add_swagger_routes() self.resource_converter = ResourceConverter(self.app, self.spec, self.document_options) self.view_converter = ViewConverter(self.app, self.spec, self.document_options) for deferred in self._deferred: deferred() def _defer(self, callable, *args, **kwargs): bound = functools.partial(callable, *args, **kwargs) self._deferred.append(bound) if self.app: bound() def add_swagger_routes(self): blueprint = flask.Blueprint( 'flask-apispec', __name__, static_folder='./static', template_folder='./templates', static_url_path='/flask-apispec/static', ) json_url = self.app.config.get('APISPEC_SWAGGER_URL', '/swagger/') if json_url: blueprint.add_url_rule(json_url, 'swagger-json', self.swagger_json) ui_url = self.app.config.get('APISPEC_SWAGGER_UI_URL', '/swagger-ui/') if ui_url: blueprint.add_url_rule(ui_url, 'swagger-ui', self.swagger_ui) self.app.register_blueprint(blueprint) def swagger_json(self): return flask.jsonify(self.spec.to_dict()) def swagger_ui(self): return flask.render_template('swagger-ui.html') def register_existing_resources(self): for name, rule in self.app.view_functions.items(): try: blueprint_name, _ = name.split('.') except ValueError: blueprint_name = None try: self.register(rule, blueprint=blueprint_name) except TypeError: pass def register(self, target, endpoint=None, blueprint=None, resource_class_args=None, resource_class_kwargs=None): self._defer(self._register, target, endpoint, blueprint, resource_class_args, resource_class_kwargs)
MIT License
peoplepower/botlab
com.ppc.Lesson6-DataStreams/intelligence/lesson6/location_datastream_microservice.py
LocationDataStreamMicroservice.datastream_updated
python
def datastream_updated(self, botengine, address, content): if hasattr(self, address): getattr(self, address)(botengine, content)
Data Stream Message Received :param botengine: BotEngine environment :param address: Data Stream address :param content: Content of the message
https://github.com/peoplepower/botlab/blob/21cc90c558a17b7ef4a42bca247b437d2f968dc0/com.ppc.Lesson6-DataStreams/intelligence/lesson6/location_datastream_microservice.py#L123-L132
from intelligence.intelligence import Intelligence from devices.light.light import LightDevice class LocationDataStreamMicroservice(Intelligence): def __init__(self, botengine, parent): Intelligence.__init__(self, botengine, parent) self.is_present = self.parent.is_present(botengine) def initialize(self, botengine): return def destroy(self, botengine): return def get_html_summary(self, botengine, oldest_timestamp_ms, newest_timestamp_ms, test_mode=False): return "" def mode_updated(self, botengine, current_mode): if not self.parent.is_present(botengine) and self.is_present: self.is_present = False botengine.get_logger().info("location_datastream_microservice : Sending an internal data stream message to all listeners to turn everything off!") self.parent.distribute_datastream_message(botengine, "toggle_everything", {"on": False}, internal=True, external=False) elif self.parent.is_present(botengine) and not self.is_present: self.is_present = True botengine.get_logger().info("location_datastream_microservice : Sending an internal data stream message to all listeners to turn everything on!") self.parent.distribute_datastream_message(botengine, "toggle_everything", {"on": True}, internal=True, external=False) def device_measurements_updated(self, botengine, device_object): return def device_metadata_updated(self, botengine, device_object): return def device_alert(self, botengine, device_object, alert_type, alert_params): return def device_deleted(self, botengine, device_object): return def question_answered(self, botengine, question): return
Apache License 2.0
crespo-otero-group/fromage
fromage/scripts/fro_assign_charges.py
charged_kinds
python
def charged_kinds(in_atoms, in_kinds): q_kinds = [] for kind in in_kinds: charges = [] for atom in in_atoms: if atom.kind == kind: charges.append(atom.q) if charges: avg_charge = sum(charges) / float(len(charges)) else: avg_charge = 0 q_kinds.append((avg_charge, kind)) return q_kinds
Get charged atom kinds from charged atoms and kinds. For each kind of atom to be charged, goes through the list of atoms and makes an average of the partial atomic charge of atoms of that type. Parameters ---------- in_atoms : Mol object The atoms should be charged and some of them at least should be of the relevant kind in_kinds : list of tuples The tuples are of the form (a,b) where a is an element string (like 'C') and b is a frozenset of ((element string,order of connection),amount of connections). (a,b) is known as an atom kind Returns ------- q_kinds : list of tuples Each tuple is now (average charge,kind). This tuple is known as a charged kind
https://github.com/crespo-otero-group/fromage/blob/9b4a80698ed1672268dde292d5512c72a23cb00a/fromage/scripts/fro_assign_charges.py#L119-L154
import numpy as np import sys import argparse import fromage.io.read_file as rf def detect_1_connect(in_atoms): nat_mol = len(in_atoms) cnct = np.zeros((nat_mol, nat_mol),dtype=int) for i, i_atom in enumerate(in_atoms): for j, j_atom in enumerate(in_atoms): if np.count_nonzero(in_atoms.vectors) == 0: if in_atoms.bonded(i_atom, j_atom): cnct[i][j] = 1 else: if in_atoms.per_bonded(i_atom, j_atom): cnct[i][j] = 1 return cnct def expand_connect(in_mat): out_mat = np.copy(in_mat) for i, row in enumerate(in_mat): dangles = [] connectors = [] for j, element in enumerate(row): if element == 0 and j > i: dangles.append(j) elif element != 0: connectors.append(j) for dangle in dangles: orders = [] for k, dangle_element in enumerate(in_mat[dangle]): if dangle_element != 0 and k in connectors: orders.append(dangle_element + in_mat[i][k]) if orders: out_mat[i][dangle] = min(orders) out_mat[dangle][i] = min(orders) return out_mat def complete_expand(in_mat): mat = np.copy(in_mat) i = 1 while True: i += 1 temp_mat = expand_connect(mat) if np.array_equal(mat, temp_mat): break mat = np.copy(temp_mat) return mat def get_connectivity_mat(in_mol): first_connect = detect_1_connect(in_mol) connect_mat = complete_expand(first_connect) return connect_mat
MIT License
jest-community/jest-pytest
src/__tests__/integration/home-assistant/homeassistant/components/switch/telnet.py
TelnetSwitch.turn_off
python
def turn_off(self, **kwargs): self._telnet_command(self._command_off) if self.assumed_state: self._state = False
Turn the device off.
https://github.com/jest-community/jest-pytest/blob/b197b0b31e3ca5c411202d97583cbd2d2b0b92e9/src/__tests__/integration/home-assistant/homeassistant/components/switch/telnet.py#L140-L144
from datetime import timedelta import logging import telnetlib import voluptuous as vol from homeassistant.components.switch import ( ENTITY_ID_FORMAT, PLATFORM_SCHEMA, SwitchDevice) from homeassistant.const import ( CONF_COMMAND_OFF, CONF_COMMAND_ON, CONF_COMMAND_STATE, CONF_NAME, CONF_PORT, CONF_RESOURCE, CONF_SWITCHES, CONF_VALUE_TEMPLATE) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_PORT = 23 SWITCH_SCHEMA = vol.Schema({ vol.Required(CONF_COMMAND_OFF): cv.string, vol.Required(CONF_COMMAND_ON): cv.string, vol.Required(CONF_RESOURCE): cv.string, vol.Optional(CONF_VALUE_TEMPLATE): cv.template, vol.Optional(CONF_COMMAND_STATE): cv.string, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_SWITCHES): vol.Schema({cv.slug: SWITCH_SCHEMA}), }) SCAN_INTERVAL = timedelta(seconds=10) def setup_platform(hass, config, add_devices, discovery_info=None): devices = config.get(CONF_SWITCHES, {}) switches = [] for object_id, device_config in devices.items(): value_template = device_config.get(CONF_VALUE_TEMPLATE) if value_template is not None: value_template.hass = hass switches.append( TelnetSwitch( hass, object_id, device_config.get(CONF_RESOURCE), device_config.get(CONF_PORT), device_config.get(CONF_NAME, object_id), device_config.get(CONF_COMMAND_ON), device_config.get(CONF_COMMAND_OFF), device_config.get(CONF_COMMAND_STATE), value_template ) ) if not switches: _LOGGER.error("No switches added") return add_devices(switches) class TelnetSwitch(SwitchDevice): def __init__(self, hass, object_id, resource, port, friendly_name, command_on, command_off, command_state, value_template): self._hass = hass self.entity_id = ENTITY_ID_FORMAT.format(object_id) self._resource = resource self._port = port self._name = friendly_name self._state = False self._command_on = command_on self._command_off = command_off self._command_state = command_state self._value_template = value_template def _telnet_command(self, command): try: telnet = telnetlib.Telnet(self._resource, self._port) telnet.write(command.encode('ASCII') + b'\r') response = telnet.read_until(b'\r', timeout=0.2) return response.decode('ASCII').strip() except IOError as error: _LOGGER.error( 'Command "%s" failed with exception: %s', command, repr(error)) return None @property def name(self): return self._name @property def should_poll(self): return self._command_state is not None @property def is_on(self): return self._state @property def assumed_state(self): return self._command_state is None def update(self): response = self._telnet_command(self._command_state) if response: rendered = self._value_template .render_with_possible_json_value(response) self._state = rendered == "True" else: _LOGGER.warning( "Empty response for command: %s", self._command_state) def turn_on(self, **kwargs): self._telnet_command(self._command_on) if self.assumed_state: self._state = True
MIT License
borda/pyimsegm
experiments_ovary_centres/run_center_prediction.py
main
python
def main(params): params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT) tl_expt.set_experiment_logger(params['path_expt']) logging.info('COMPUTER: \n%r', platform.uname()) logging.info(tl_expt.string_dict(params, desc='PARAMETERS')) tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER) path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES) df_paths = get_csv_triplets( params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN ) dict_classif = seg_clf.load_classifier(params['path_classif']) params_clf = dict_classif['params'] params_clf.update(params) logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS')) df_stat = pd.DataFrame() _wrapper_detection = partial( load_compute_detect_centers, params=params_clf, path_classif=params['path_classif'], path_output=params['path_expt'], ) iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers']) for dict_center in iterate: df_stat = df_stat.append(dict_center, ignore_index=True) df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP)) df_stat.set_index(['image'], inplace=True) df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES)) logging.info('STATISTIC: \n %r', df_stat.describe())
PIPELINE for new detections :param dict(str,str) params:
https://github.com/borda/pyimsegm/blob/7463cfc7aad8781564dc84c8780f291cc3c17fe3/experiments_ovary_centres/run_center_prediction.py#L122-L161
import gc import logging import os import platform import sys import time from functools import partial import pandas as pd sys.path += [os.path.abspath('.'), os.path.abspath('..')] import run_center_candidate_training as run_train import run_center_clustering as run_clust import imsegm.classification as seg_clf import imsegm.utilities.data_io as tl_data import imsegm.utilities.experiments as tl_expt FORCE_RERUN = False NAME_CSV_TRIPLES = run_train.NAME_CSV_TRIPLES NAME_CSV_TRIPLES_TEMP = os.path.splitext(NAME_CSV_TRIPLES)[0] + '__TEMP.csv' FOLDER_INPUTS = 'inputs' FOLDER_POINTS = run_train.FOLDER_POINTS FOLDER_POINTS_VISU = run_train.FOLDER_POINTS_VISU FOLDER_CENTRE = run_clust.FOLDER_CENTER FOLDER_CLUSTER_VISUAL = run_clust.FOLDER_CLUSTER_VISUAL LIST_SUBFOLDER = [FOLDER_INPUTS, FOLDER_POINTS, FOLDER_POINTS_VISU, FOLDER_CENTRE, FOLDER_CLUSTER_VISUAL] FOLDER_EXPERIMENT = 'detect-centers-predict_%s' DEFAULT_PARAMS = run_train.CENTER_PARAMS DEFAULT_PARAMS.update(run_clust.CLUSTER_PARAMS) DEFAULT_PARAMS['path_centers'] = os.path.join( DEFAULT_PARAMS['path_output'], run_train.FOLDER_EXPERIMENT % DEFAULT_PARAMS['name'], 'classifier_RandForest.pkl' ) def load_compute_detect_centers(idx_row, params, classif=None, path_classif='', path_output=''): _, row = idx_row dict_center = dict(row) if not classif: dict_classif = seg_clf.load_classifier(path_classif) classif = dict_classif['clf_pipeline'] try: path_show_in = os.path.join(path_output, FOLDER_INPUTS) name, img, segm, _ = run_train.load_image_segm_center((None, row), path_show_in, params['dict_relabel']) t_start = time.time() _, slic, points, features, _ = run_train.estim_points_compute_features(name, img, segm, params) dict_detect = run_train.detect_center_candidates( name, img, segm, None, slic, points, features, params, path_output, classif ) dict_detect['time elapsed'] = time.time() - t_start dict_center.update(dict_detect) dict_center = run_clust.cluster_points_draw_export(dict_center, params, path_output) except Exception: logging.exception('load_compute_detect_centers') gc.collect() time.sleep(1) return dict_center def get_csv_triplets(path_csv, path_csv_out, path_imgs, path_segs, path_centers=None, force_reload=False): if os.path.isfile(path_csv): logging.info('loading path pairs "%s"', path_csv) df_paths = pd.read_csv(path_csv, index_col=0) df_paths['image'] = df_paths['path_image'].apply(lambda x: os.path.splitext(os.path.basename(x))[0]) df_paths.set_index('image', inplace=True) elif os.path.isfile(path_csv_out) and not force_reload: logging.info('loading path pairs "%s"', path_csv_out) df_paths = pd.read_csv(path_csv_out, index_col=0) else: logging.info('estimating own triples') df_paths = run_train.find_match_images_segms_centers(path_imgs, path_segs, path_centers) df_paths['image'] = df_paths['path_image'].apply(lambda x: os.path.splitext(os.path.basename(x))[0]) df_paths.set_index('image', inplace=True) for col in (c for c in df_paths.columns if c.startswith('path_')): df_paths[col] = df_paths[col].apply(tl_data.update_path) df_paths.to_csv(path_csv_out) return df_paths
BSD 3-Clause New or Revised License
theirc/cts
shipments/tasks.py
delete_shipment
python
def delete_shipment(shipment_id): try: try: shipment = Shipment.objects.get(pk=shipment_id) except Shipment.DoesNotExist: logger.error("In delete_shipment task, no shipment with id %s" % shipment_id) else: shipment.fast_delete() except Exception: logger.exception("Unexpected error in delete_shipment")
Task to delete a shipment, because it can take more than 60 seconds.
https://github.com/theirc/cts/blob/43eb3e3b78c19f9e1dc02158ca12fc0c5d6bb270/shipments/tasks.py#L10-L22
import logging from celery.task import task from shipments.models import Shipment logger = logging.getLogger(__name__) @task
BSD 3-Clause New or Revised License
inquest/omnibus
omnibus-cli.py
Console.do_hibp
python
def do_hibp(self, arg): result = self.dispatch.submit(self.session, 'hibp', arg) pp_json(result)
Check HaveIBeenPwned for email address
https://github.com/inquest/omnibus/blob/88dbf5d02f87eaa79a1cfc13d403cf854ee44c40/omnibus-cli.py#L468-L471
import os import sys import cmd2 import json import argparse from lib import common from lib import storage from lib import asciiart from lib.mongo import Mongo from lib.cache import RedisCache from lib.dispatch import Dispatch from lib.common import info from lib.common import mkdir from lib.common import error from lib.common import running from lib.common import success from lib.common import warning from lib.common import bold_msg from lib.common import pp_json from lib.common import lookup_key from lib.common import detect_type from lib.common import read_file from lib.common import get_option from lib.models import create_artifact help_dict = { 'general': [ 'help', 'history', 'quit', 'cat', 'apikey', 'banner', 'set', 'clear', 'artifacts', 'general', 'redirect', 'sessions', 'modules' ], 'artifacts': [ 'new', 'cat', 'open', 'source', 'artifacts', 'delete' ], 'modules': [ 'blockchain', 'clearbit', 'censys', 'csirtg', 'csirtg', 'cymon', 'dnsresolve', 'geoip', 'fullcontact', 'hackedemails', 'he', 'hibp', 'ipinfo', 'ipvoid', 'isc', 'keybase', 'machine', 'nmap', 'passivetotal', 'pgp', 'rss', 'shodan', 'threatcrowd', 'twitter', 'urlvoid', 'virustotal', 'web', 'whois'], 'sessions': [ 'session', 'ls', 'rm', 'wipe' ] } class Console(cmd2.Cmd): def __init__(self): cmd2.Cmd.__init__(self, completekey='tab', persistent_history_file=get_option('core', 'hist_file', config), persistent_history_length=int(get_option('core', 'hist_size', config))) self.allow_cli_args = False self.default_to_shell = False self.intro = 'Welcome to the Omnibus shell! Type "session" to get started or "help" to view all commands.' self.allow_redirection = True self.prompt = 'omnibus >> ' self.redirector = '>' self.quit_on_sigint = False del cmd2.Cmd.do_alias del cmd2.Cmd.do_edit del cmd2.Cmd.do_eof del cmd2.Cmd.do_shell del cmd2.Cmd.do_eos del cmd2.Cmd.do_load del cmd2.Cmd.do_py del cmd2.Cmd.do_pyscript del cmd2.Cmd.do_shortcuts del cmd2.Cmd.do_unalias del cmd2.Cmd.do__relative_load self.db = Mongo(config) self.dispatch = Dispatch(self.db) self.session = None if DEBUG: self.do_set('debug true') def sigint_handler(self, signum, frame): pipe_proc = self.pipe_proc if pipe_proc is not None: pipe_proc.terminate() if self.session is not None: self.session.flush() raise KeyboardInterrupt('Caught keyboard interrupt; quitting ...') def default(self, arg): if arg.startswith('#'): return error('Unknown command') return def do_quit(self, _): self._should_quit = True if self.session is not None: running('Clearing artifact cache ...') self.session.flush() warning('Closing Omnibus shell ...') return self._STOP_AND_EXIT def do_clear(self, arg): os.system('clear') def do_modules(self, arg): bold_msg('[ Modules ]') for cmd in help_dict['modules']: print(cmd) def do_artifacts(self, arg): bold_msg('[ Artifacts ]') for cmd in help_dict['artifacts']: print(cmd) def do_general(self, arg): bold_msg('[ General Commands ]') for cmd in help_dict['general']: print(cmd) def do_sessions(self, arg): bold_msg('[ Session Commands ]') for cmd in help_dict['sessions']: print(cmd) def do_redirect(self, arg): info('Omnibus supports command redirection to output files using the ">" character. For example, "cat host zeroharbor.org > zh.json" will pipe the output of the cat command to ./zh.json on disk.') def do_banner(self, arg): print(asciiart.show_banner()) def do_session(self, arg): self.session = RedisCache(config) if self.session.db is None: error('Failed to connect to Redis back-end. Please ensure the Redis service is running') else: success('Opened new session') def do_ls(self, arg): if self.session is None: warning('No active session') return count = 0 keys = self.session.db.scan_iter() for key in keys: value = self.session.get(key) print('[%s] %s' % (key, value)) count += 1 info('Active Artifacts: %d' % count) def do_wipe(self, arg): if self.session is not None: info('Clearing active artifacts from cache ...') self.session.flush() success('Artifact cache cleared') else: warning('No active session; start a new session by running the "session" command') def do_rm(self, arg): try: arg = int(arg) except: error('Artifact ID must be an integer') return if self.session is not None: if self.session.exists(arg): self.session.delete(arg) success('Removed artifact from cache (%s)' % arg) else: warning('Unable to find artifact by ID (%s)' % arg) else: warning('No active session; start a new session by running the "session" command') def do_new(self, arg): artifact = create_artifact(arg) if not self.db.exists(artifact.type, {'name': artifact.name}): doc_id = self.db.insert_one(artifact.type, artifact) if doc_id is not None: success('Created new artifact (%s - %s)' % (artifact.name, artifact.type)) if self.session is None: self.session = RedisCache(config) self.session.set(1, artifact.name) success('Opened new session') print('Artifact ID: 1') else: count = 0 for key in self.session.db.scan_iter(): count += 1 _id = count + 1 self.session.set(_id, artifact.name) print('Artifact ID: %s' % _id) def do_delete(self, arg): is_key, value = lookup_key(self.session, arg) if is_key and value is None: error('Unable to find artifact key in session (%s)' % arg) return elif is_key and value is not None: arg = value else: pass artifact_type = detect_type(arg) self.db.delete_one(artifact_type, {'name': arg}) def do_cat(self, arg): if arg == 'apikeys': data = json.load(open(common.API_CONF, 'rb')) print json.dumps(data, indent=2) else: is_key, value = lookup_key(self.session, arg) if is_key and value is None: error('Unable to find artifact key in session (%s)' % arg) return elif is_key and value is not None: arg = value else: pass artifact_type = detect_type(arg) result = self.db.find(artifact_type, {'name': arg}, one=True) if len(result) == 0: info('No entry found for artifact (%s)' % arg) else: print json.dumps(result, indent=2, separators=(',', ':')) def do_open(self, arg): if not os.path.exists(arg): warning('Cannot find file on disk (%s)' % arg) return artifacts = read_file(arg, True) for artifact in artifacts: new_artifact = create_artifact(artifact) if not self.db.exists(new_artifact.type, {'name': new_artifact.name}): doc_id = self.db.insert_one(new_artifact.type, new_artifact) if doc_id is not None: success('Created new artifact (%s - %s)' % (artifact.name, artifact.type)) if self.session is None: self.session = RedisCache(config) self.session.set(1, arg) success('Opened new session') print('Artifact ID: 1') else: count = 0 for key in self.session.db.scan_iter(): count += 1 _id = count + 1 self.session.set(_id, arg) print('Artifact ID: %s' % _id) success('Finished loading artifact list') def do_report(self, arg): is_key, value = lookup_key(self.session, arg) if is_key and value is None: error('Unable to find artifact key in session (%s)' % arg) return elif is_key and value is not None: arg = value else: pass _type = detect_type(arg) result = self.db.find(_type, {'name': arg}, one=True) if len(result) == 0: warning('No entry found for artifact (%s)' % arg) else: report = storage.JSON(data=result, file_path=output_dir) report.save() if os.path.exists(report.file_path): success('Saved artifact report (%s)' % report.file_path) else: error('Failed to properly save report') def do_machine(self, arg): result = self.dispatch.machine(self.session, arg) pp_json(result) def do_blockchain(self, arg): result = self.dispatch.submit(self.session, 'blockchain', arg) pp_json(result) def do_clearbit(self, arg): result = self.dispatch.submit(self.session, 'clearbit', arg) pp_json(result) def do_censys(self, arg): result = self.dispatch.submit(self.session, 'censys', arg) pp_json(result) def do_csirtg(self, arg): result = self.dispatch.submit(self.session, 'csirtg', arg) pp_json(result) def do_cybercure(self, arg): result = self.dispatch.submit(self.session, 'cybercure', arg) pp_json(result) def do_cymon(self, arg): result = self.dispatch.submit(self.session, 'cymon', arg) pp_json(result) def do_dnsresolve(self, arg): result = self.dispatch.submit(self.session, 'dnsresolve', arg) pp_json(result) def do_geoip(self, arg): result = self.dispatch.submit(self.session, 'geoip', arg) pp_json(result) def do_fullcontact(self, arg): result = self.dispatch.submit(self.session, 'fullcontact', arg) pp_json(result) def do_github(self, arg): result = self.dispatch.submit(self.session, 'github', arg) pp_json(result) def do_hackedemails(self, arg): result = self.dispatch.submit(self.session, 'hackedemails', arg) pp_json(result) def do_he(self, arg): result = self.dispatch.submit(self.session, 'he', arg) pp_json(result)
MIT License
kozea/weasyprint
weasyprint/text/fonts.py
FontConfiguration.__del__
python
def __del__(self): for filename in self._filenames: try: os.remove(filename) except OSError: continue
Clean a font configuration for a document.
https://github.com/kozea/weasyprint/blob/a149af9aaf902901d5d19134f5393e2637bcd219/weasyprint/text/fonts.py#L282-L292
import io import os import pathlib import sys import tempfile import warnings from fontTools.ttLib import TTFont, woff2 from ..logger import LOGGER from ..urls import FILESYSTEM_ENCODING, fetch from .constants import ( CAPS_KEYS, EAST_ASIAN_KEYS, FONTCONFIG_STRETCH, FONTCONFIG_STYLE, FONTCONFIG_WEIGHT, LIGATURE_KEYS, NUMERIC_KEYS) from .ffi import ffi, fontconfig, gobject, pangoft2 def _check_font_configuration(font_config): fonts = fontconfig.FcConfigGetFonts(font_config, fontconfig.FcSetSystem) if fonts.nfont > 0: return config_files = fontconfig.FcConfigGetConfigFiles(font_config) config_file = fontconfig.FcStrListNext(config_files) if config_file == ffi.NULL: warnings.warn( 'FontConfig cannot load default config file. ' 'Expect ugly output.') return else: warnings.warn( 'FontConfig: No fonts configured. Expect ugly output.') return _check_font_configuration(ffi.gc( fontconfig.FcInitLoadConfigAndFonts(), fontconfig.FcConfigDestroy)) class FontConfiguration: def __init__(self): self._fontconfig_config = ffi.gc( fontconfig.FcInitLoadConfigAndFonts(), fontconfig.FcConfigDestroy) self.font_map = ffi.gc( pangoft2.pango_ft2_font_map_new(), gobject.g_object_unref) pangoft2.pango_fc_font_map_set_config( ffi.cast('PangoFcFontMap *', self.font_map), self._fontconfig_config) fontconfig.FcConfigDestroy(self._fontconfig_config) self._tempdir = None if sys.platform.startswith('win'): self._tempdir = os.path.join( tempfile.gettempdir(), 'weasyprint') try: os.mkdir(self._tempdir) except FileExistsError: pass except Exception: self._tempdir = None self._filenames = [] def add_font_face(self, rule_descriptors, url_fetcher): if self.font_map is None: return for font_type, url in rule_descriptors['src']: if url is None: continue if font_type in ('external', 'local'): config = self._fontconfig_config if font_type == 'local': font_name = url.encode('utf-8') pattern = ffi.gc( fontconfig.FcPatternCreate(), fontconfig.FcPatternDestroy) fontconfig.FcConfigSubstitute( config, pattern, fontconfig.FcMatchFont) fontconfig.FcDefaultSubstitute(pattern) fontconfig.FcPatternAddString( pattern, b'fullname', font_name) fontconfig.FcPatternAddString( pattern, b'postscriptname', font_name) family = ffi.new('FcChar8 **') postscript = ffi.new('FcChar8 **') result = ffi.new('FcResult *') matching_pattern = fontconfig.FcFontMatch( config, pattern, result) if matching_pattern == ffi.NULL: LOGGER.debug( 'Failed to get matching local font for %r', font_name.decode('utf-8')) continue fontconfig.FcPatternGetString( matching_pattern, b'fullname', 0, family) fontconfig.FcPatternGetString( matching_pattern, b'postscriptname', 0, postscript) family = ffi.string(family[0]) postscript = ffi.string(postscript[0]) if font_name.lower() in ( family.lower(), postscript.lower()): filename = ffi.new('FcChar8 **') fontconfig.FcPatternGetString( matching_pattern, b'file', 0, filename) path = ffi.string(filename[0]).decode( FILESYSTEM_ENCODING) url = pathlib.Path(path).as_uri() else: LOGGER.debug( 'Failed to load local font "%s"', font_name.decode('utf-8')) continue try: with fetch(url_fetcher, url) as result: if 'string' in result: font = result['string'] else: font = result['file_obj'].read() if font[:3] == b'wOF': out = io.BytesIO() if font[3:4] == b'F': ttfont = TTFont(io.BytesIO(font)) ttfont.flavor = ttfont.flavorData = None ttfont.save(out) elif font[3:4] == b'2': woff2.decompress(io.BytesIO(font), out) font = out.getvalue() except Exception as exc: LOGGER.debug( 'Failed to load font at %r (%s)', url, exc) continue features = { rules[0][0].replace('-', '_'): rules[0][1] for rules in rule_descriptors.get('font_variant', [])} if 'font_feature_settings' in rule_descriptors: features['font_feature_settings'] = ( rule_descriptors['font_feature_settings']) features_string = '' for key, value in font_features(**features).items(): features_string += f'<string>{key} {value}</string>' fd = tempfile.NamedTemporaryFile( 'wb', dir=self._tempdir, delete=False) font_filename = fd.name fd.write(font) fd.close() self._filenames.append(font_filename) fontconfig_style = FONTCONFIG_STYLE[ rule_descriptors.get('font_style', 'normal')] fontconfig_weight = FONTCONFIG_WEIGHT[ rule_descriptors.get('font_weight', 'normal')] fontconfig_stretch = FONTCONFIG_STRETCH[ rule_descriptors.get('font_stretch', 'normal')] xml = f'''<?xml version="1.0"?> <!DOCTYPE fontconfig SYSTEM "fonts.dtd"> <fontconfig> <match target="scan"> <test name="file" compare="eq"> <string>{font_filename}</string> </test> <edit name="family" mode="assign_replace"> <string>{rule_descriptors['font_family']}</string> </edit> <edit name="slant" mode="assign_replace"> <const>{fontconfig_style}</const> </edit> <edit name="weight" mode="assign_replace"> <const>{fontconfig_weight}</const> </edit> <edit name="width" mode="assign_replace"> <const>{fontconfig_stretch}</const> </edit> </match> <match target="font"> <test name="file" compare="eq"> <string>{font_filename}</string> </test> <edit name="fontfeatures" mode="assign_replace">{features_string}</edit> </match> </fontconfig>''' fd = tempfile.NamedTemporaryFile( 'w', dir=self._tempdir, delete=False) fd.write(xml) fd.close() self._filenames.append(fd.name) fontconfig.FcConfigParseAndLoad( config, fd.name.encode(FILESYSTEM_ENCODING), True) font_added = fontconfig.FcConfigAppFontAddFile( config, font_filename.encode(FILESYSTEM_ENCODING)) if font_added: pangoft2.pango_fc_font_map_config_changed( ffi.cast('PangoFcFontMap *', self.font_map)) return font_filename else: LOGGER.debug('Failed to load font at %r', url) LOGGER.warning( 'Font-face %r cannot be loaded', rule_descriptors['font_family'])
BSD 3-Clause New or Revised License
scut-ailab/dcp
dcp/channel_selection/channel_selection.py
LayerChannelSelection.prepare_channel_selection
python
def prepare_channel_selection(self, original_segment, pruned_segment, module, aux_fc, layer_name, block_count): self.split_segment_into_three_parts(original_segment, pruned_segment, block_count) pruned_segment, layer = self.replace_layer_with_mask_conv(pruned_segment, module, layer_name, block_count) self.register_layer_hook(original_segment, pruned_segment, module, layer_name, block_count) self.segment_parallelism(original_segment, pruned_segment) for params in self.original_segment_parallel.parameters(): params.requires_grad = False for params in self.pruned_segment_parallel.parameters(): params.requires_grad = False self.original_segment_parallel.eval() self.pruned_segment_parallel.eval() aux_fc.eval() self.num_batch = len(self.train_loader) layer.pruned_weight.requires_grad = True aux_fc.cuda() self.logger_counter = 0 return pruned_segment, layer
Prepare for channel selection 1. Split the segment into three parts. 2. Replace the pruned layer with mask convolution. 3. Store the input feature map of the pruned layer in advance to accelerate channel selection.
https://github.com/scut-ailab/dcp/blob/70a2e53ae896573b0b4323eac5817e5660315cb4/dcp/channel_selection/channel_selection.py#L179-L213
import datetime import math import os import time import torch import torch.nn as nn import dcp.utils as utils from dcp.mask_conv import MaskConv2d from dcp.utils.others import concat_gpu_data from dcp.utils.write_log import write_log class LayerChannelSelection(object): def __init__(self, trainer, train_loader, val_loader, settings, checkpoint, logger, tensorboard_logger): self.segment_wise_trainer = trainer self.train_loader = train_loader self.val_loader = val_loader self.settings = settings self.checkpoint = checkpoint self.logger = logger self.tensorboard_logger = tensorboard_logger self.feature_cache_original_input = {} self.feature_cache_original_output = {} self.feature_cache_pruned_input = {} self.feature_cache_pruned_output = {} self.criterion_mse = nn.MSELoss().cuda() self.criterion_softmax = nn.CrossEntropyLoss().cuda() self.logger_counter = 0 self.record_time = utils.AverageMeter() self.record_selection_mse_loss = utils.AverageMeter() self.record_selection_softmax_loss = utils.AverageMeter() self.record_selection_loss = utils.AverageMeter() self.record_sub_problem_softmax_loss = utils.AverageMeter() self.record_sub_problem_mse_loss = utils.AverageMeter() self.record_sub_problem_loss = utils.AverageMeter() self.record_sub_problem_top1_error = utils.AverageMeter() self.record_sub_problem_top5_error = utils.AverageMeter() def split_segment_into_three_parts(self, original_segment, pruned_segment, block_count): original_segment_list = utils.model2list(original_segment) pruned_segment_list = utils.model2list(pruned_segment) original_segment_before_pruned_module = [] pruned_segment_before_pruned_module = [] pruned_segment_after_pruned_module = [] for i in range(len(pruned_segment)): if i < block_count: original_segment_before_pruned_module.append(original_segment_list[i]) pruned_segment_before_pruned_module.append(pruned_segment_list[i]) if i > block_count: pruned_segment_after_pruned_module.append(pruned_segment_list[i]) self.original_segment_before_pruned_module = nn.Sequential(*original_segment_before_pruned_module) self.pruned_segment_before_pruned_module = nn.Sequential(*pruned_segment_before_pruned_module) self.pruned_segment_after_pruned_module = nn.Sequential(*pruned_segment_after_pruned_module) @staticmethod def _replace_layer(net, layer, layer_index): assert isinstance(net, nn.Sequential), "only support nn.Sequential" new_net = None count = 0 for origin_layer in net: if count == layer_index: if new_net is None: new_net = nn.Sequential(layer) else: new_net.add_module(str(len(new_net)), layer) else: if new_net is None: new_net = nn.Sequential(origin_layer) else: new_net.add_module(str(len(new_net)), origin_layer) count += 1 return new_net def replace_layer_with_mask_conv(self, pruned_segment, module, layer_name, block_count): if layer_name == "conv2": layer = module.conv2 elif layer_name == "conv3": layer = module.conv3 elif layer_name == "conv": assert self.settings.net_type in ["vgg"], "only support vgg" layer = module else: assert False, "unsupport layer: {}".format(layer_name) if not isinstance(layer, MaskConv2d): temp_conv = MaskConv2d( in_channels=layer.in_channels, out_channels=layer.out_channels, kernel_size=layer.kernel_size, stride=layer.stride, padding=layer.padding, bias=(layer.bias is not None)) temp_conv.weight.data.copy_(layer.weight.data) if layer.bias is not None: temp_conv.bias.data.copy_(layer.bias.data) temp_conv.pruned_weight.data.fill_(0) temp_conv.d.fill_(0) if layer_name == "conv2": module.conv2 = temp_conv elif layer_name == "conv3": module.conv3 = temp_conv elif layer_name == "conv": pruned_segment = self._replace_layer(net=pruned_segment, layer=temp_conv, layer_index=block_count) layer = temp_conv return pruned_segment, layer def _hook_origin_feature(self, module, input, output): gpu_id = str(output.get_device()) self.feature_cache_original_input[gpu_id] = input[0] self.feature_cache_original_output[gpu_id] = output def _hook_pruned_feature(self, module, input, output): gpu_id = str(output.get_device()) self.feature_cache_pruned_input[gpu_id] = input[0] self.feature_cache_pruned_output[gpu_id] = output def register_layer_hook(self, original_segment, pruned_segment, module, layer_name, block_count): if layer_name == "conv2": self.hook_origin = original_segment[block_count].conv2.register_forward_hook(self._hook_origin_feature) self.hook_pruned = module.conv2.register_forward_hook(self._hook_pruned_feature) elif layer_name == "conv3": self.hook_origin = original_segment[block_count].conv3.register_forward_hook(self._hook_origin_feature) self.hook_pruned = module.conv3.register_forward_hook(self._hook_pruned_feature) elif layer_name == "conv": self.hook_origin = original_segment[block_count].register_forward_hook(self._hook_origin_feature) self.hook_pruned = pruned_segment[block_count].register_forward_hook(self._hook_pruned_feature) def segment_parallelism(self, original_segment, pruned_segment): self.original_segment_parallel = utils.data_parallel(original_segment, self.settings.n_gpus) self.pruned_segment_parallel = utils.data_parallel(pruned_segment, self.settings.n_gpus) def reset_average_meter(self): self.record_time.reset() self.record_selection_mse_loss.reset() self.record_selection_softmax_loss.reset() self.record_selection_loss.reset() self.record_sub_problem_softmax_loss.reset() self.record_sub_problem_mse_loss.reset() self.record_sub_problem_loss.reset() self.record_sub_problem_top1_error.reset() self.record_sub_problem_top5_error.reset()
BSD 3-Clause New or Revised License
rojopolis/terraform-aws-lambda-python-archive
scripts/build_lambda.py
get_hash
python
def get_hash(output_path): with open(output_path, 'rb') as f: h = hashlib.sha256() h.update(f.read()) return base64.standard_b64encode(h.digest()).decode('utf-8', 'strict')
Return base64 encoded sha256 hash of archive file
https://github.com/rojopolis/terraform-aws-lambda-python-archive/blob/0b0dc9cf0870c4280495633aa0cd92fe197bbde2/scripts/build_lambda.py#L61-L68
from distutils.dir_util import copy_tree import base64 import errno import hashlib import json import logging import os import shutil import subprocess import sys import tempfile import zipfile def build(src_dir, output_path, install_dependencies): with tempfile.TemporaryDirectory() as build_dir: copy_tree(src_dir, build_dir) if os.path.exists(os.path.join(src_dir, 'requirements.txt')): subprocess.run( [sys.executable, '-m', 'pip', 'install', '--ignore-installed', '--target', build_dir, '-r', os.path.join(build_dir, 'requirements.txt'), *(['--no-deps'] if install_dependencies == 'false' else [])], check=True, stdout=subprocess.DEVNULL, ) make_archive(build_dir, output_path) return output_path def make_archive(src_dir, output_path): try: os.makedirs(os.path.dirname(output_path)) except OSError as e: if e.errno == errno.EEXIST: pass else: raise with zipfile.ZipFile(output_path, 'w') as archive: for root, dirs, files in os.walk(src_dir): for file in files: if file.endswith('.pyc'): break metadata = zipfile.ZipInfo( os.path.join(root, file).replace(src_dir, '').lstrip(os.sep) ) metadata.external_attr = 0o755 << 16 with open(os.path.join(root, file), 'rb') as f: data = f.read() archive.writestr( metadata, data )
MIT License
chenwuperth/rgz_rcnn
lib/fast_rcnn/test.py
_rescale_boxes
python
def _rescale_boxes(boxes, inds, scales): for i in xrange(boxes.shape[0]): boxes[i,:] = boxes[i,:] / scales[int(inds[i])] return boxes
Rescale boxes according to image rescaling.
https://github.com/chenwuperth/rgz_rcnn/blob/b526c237fea5c9a77bbe7bd0048f72cf93e733a4/lib/fast_rcnn/test.py#L131-L137
from fast_rcnn.config import cfg, get_output_dir import argparse from utils.timer import Timer import numpy as np import cv2 from utils.cython_nms import nms, nms_new from utils.boxes_grid import get_boxes_grid from utils.project_bbox import project_bbox_inv import cPickle import heapq from utils.blob import im_list_to_blob import os import math from rpn_msr.generate import imdb_proposals_det import tensorflow as tf from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, bbox_contains try: import matplotlib.pyplot as plt except: print('Cannot run vis during test due to the unavailability of matplotlib') from tensorflow.python.client import timeline import time from collections import defaultdict def _get_image_blob(im): im_orig = im.astype(np.float32, copy=True) im_orig -= cfg.PIXEL_MEANS im_shape = im_orig.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) processed_ims = [] im_scale_factors = [] for target_size in cfg.TEST.SCALES: im_scale = float(target_size) / float(im_size_min) if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE: im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max) im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR) im_scale_factors.append(im_scale) processed_ims.append(im) blob = im_list_to_blob(processed_ims) return blob, np.array(im_scale_factors) def _get_rois_blob(im_rois, im_scale_factors): rois, levels = _project_im_rois(im_rois, im_scale_factors) rois_blob = np.hstack((levels, rois)) return rois_blob.astype(np.float32, copy=False) def _project_im_rois(im_rois, scales): im_rois = im_rois.astype(np.float, copy=False) scales = np.array(scales) if len(scales) > 1: widths = im_rois[:, 2] - im_rois[:, 0] + 1 heights = im_rois[:, 3] - im_rois[:, 1] + 1 areas = widths * heights scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2) diff_areas = np.abs(scaled_areas - 224 * 224) levels = diff_areas.argmin(axis=1)[:, np.newaxis] else: levels = np.zeros((im_rois.shape[0], 1), dtype=np.int) rois = im_rois * scales[levels] return rois, levels def _get_blobs(im, rois): if cfg.TEST.HAS_RPN: blobs = {'data' : None, 'rois' : None} blobs['data'], im_scale_factors = _get_image_blob(im) else: blobs = {'data' : None, 'rois' : None} blobs['data'], im_scale_factors = _get_image_blob(im) if cfg.IS_MULTISCALE: if cfg.IS_EXTRAPOLATING: blobs['rois'] = _get_rois_blob(rois, cfg.TEST.SCALES) else: blobs['rois'] = _get_rois_blob(rois, cfg.TEST.SCALES_BASE) else: blobs['rois'] = _get_rois_blob(rois, cfg.TEST.SCALES_BASE) return blobs, im_scale_factors def _clip_boxes(boxes, im_shape): boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0) boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0) boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1) boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1) return boxes
MIT License
brython-dev/brython
www/src/Lib/test/test_gdb.py
PrettyPrintTests.assertSane
python
def assertSane(self, source, corruption, exprepr=None): if corruption: cmds_after_breakpoint=[corruption, 'backtrace'] else: cmds_after_breakpoint=['backtrace'] gdb_repr, gdb_output = self.get_gdb_repr(source, cmds_after_breakpoint=cmds_after_breakpoint) if exprepr: if gdb_repr == exprepr: return pattern = '<.* at remote 0x-?[0-9a-f]+>' m = re.match(pattern, gdb_repr) if not m: self.fail('Unexpected gdb representation: %r\n%s' % (gdb_repr, gdb_output))
Run Python under gdb, corrupting variables in the inferior process immediately before taking a backtrace. Verify that the variable's representation is the expected failsafe representation
https://github.com/brython-dev/brython/blob/33aeaab551f1b73209326c5a0aecf98642d4c126/www/src/Lib/test/test_gdb.py#L502-L529
import os import platform import re import subprocess import sys import sysconfig import textwrap import unittest from test import support from test.support import findfile, python_is_optimized def get_gdb_version(): try: cmd = ["gdb", "-nx", "--version"] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) with proc: version, stderr = proc.communicate() if proc.returncode: raise Exception(f"Command {' '.join(cmd)!r} failed " f"with exit code {proc.returncode}: " f"stdout={version!r} stderr={stderr!r}") except OSError: raise unittest.SkipTest("Couldn't find gdb on the path") match = re.search(r"^(?:GNU|HP) gdb.*?\b(\d+)\.(\d+)", version) if match is None: raise Exception("unable to parse GDB version: %r" % version) return (version, int(match.group(1)), int(match.group(2))) gdb_version, gdb_major_version, gdb_minor_version = get_gdb_version() if gdb_major_version < 7: raise unittest.SkipTest("gdb versions before 7.0 didn't support python " "embedding. Saw %s.%s:\n%s" % (gdb_major_version, gdb_minor_version, gdb_version)) if (gdb_major_version, gdb_minor_version) >= (9, 2): raise unittest.SkipTest("https://bugzilla.redhat.com/show_bug.cgi?id=1866884") if not sysconfig.is_python_build(): raise unittest.SkipTest("test_gdb only works on source builds at the moment.") if 'Clang' in platform.python_compiler() and sys.platform == 'darwin': raise unittest.SkipTest("test_gdb doesn't work correctly when python is" " built with LLVM clang") if ((sysconfig.get_config_var('PGO_PROF_USE_FLAG') or 'xxx') in (sysconfig.get_config_var('PY_CORE_CFLAGS') or '')): raise unittest.SkipTest("test_gdb is not reliable on PGO builds") checkout_hook_path = os.path.join(os.path.dirname(sys.executable), 'python-gdb.py') PYTHONHASHSEED = '123' def cet_protection(): cflags = sysconfig.get_config_var('CFLAGS') if not cflags: return False flags = cflags.split() return (('-mcet' in flags) and any((flag.startswith('-fcf-protection') and not flag.endswith(("=none", "=return"))) for flag in flags)) CET_PROTECTION = cet_protection() def run_gdb(*args, **env_vars): if env_vars: env = os.environ.copy() env.update(env_vars) else: env = None base_cmd = ('gdb', '--batch', '-nx') if (gdb_major_version, gdb_minor_version) >= (7, 4): base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path) proc = subprocess.Popen(base_cmd + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) with proc: out, err = proc.communicate() return out.decode('utf-8', 'replace'), err.decode('utf-8', 'replace') gdbpy_version, _ = run_gdb("--eval-command=python import sys; print(sys.version_info)") if not gdbpy_version: raise unittest.SkipTest("gdb not built with embedded python support") _, gdbpy_errors = run_gdb('--args', sys.executable) if "auto-loading has been declined" in gdbpy_errors: msg = "gdb security settings prevent use of custom hooks: " raise unittest.SkipTest(msg + gdbpy_errors.rstrip()) def gdb_has_frame_select(): stdout, _ = run_gdb("--eval-command=python print(dir(gdb.Frame))") m = re.match(r'.*\[(.*)\].*', stdout) if not m: raise unittest.SkipTest("Unable to parse output from gdb.Frame.select test") gdb_frame_dir = m.group(1).split(', ') return "'select'" in gdb_frame_dir HAS_PYUP_PYDOWN = gdb_has_frame_select() BREAKPOINT_FN='builtin_id' @unittest.skipIf(support.PGO, "not useful for PGO") class DebuggerTests(unittest.TestCase): def get_stack_trace(self, source=None, script=None, breakpoint=BREAKPOINT_FN, cmds_after_breakpoint=None, import_site=False): commands = ['set breakpoint pending yes', 'break %s' % breakpoint, 'set print address off', 'run'] if (gdb_major_version, gdb_minor_version) >= (7, 4): commands += ['set print entry-values no'] if cmds_after_breakpoint: if CET_PROTECTION: commands += ['next'] commands += cmds_after_breakpoint else: commands += ['backtrace'] args = ['--eval-command=%s' % cmd for cmd in commands] args += ["--args", sys.executable] args.extend(subprocess._args_from_interpreter_flags()) if not import_site: args += ["-S"] if source: args += ["-c", source] elif script: args += [script] out, err = run_gdb(*args, PYTHONHASHSEED=PYTHONHASHSEED) for line in err.splitlines(): print(line, file=sys.stderr) if "PC not saved" in err: raise unittest.SkipTest("gdb cannot walk the frame object" " because the Program Counter is" " not present") for pattern in ( '(frame information optimized out)', 'Unable to read information on python frame', ): if pattern in out: raise unittest.SkipTest(f"{pattern!r} found in gdb output") return out def get_gdb_repr(self, source, cmds_after_breakpoint=None, import_site=False): cmds_after_breakpoint = cmds_after_breakpoint or ["backtrace 1"] gdb_output = self.get_stack_trace(source, breakpoint=BREAKPOINT_FN, cmds_after_breakpoint=cmds_after_breakpoint, import_site=import_site) m = re.search( r'#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)?\)' r'\s+at\s+\S*[A-Za-z]+/[A-Za-z0-9_-]+\.c', gdb_output, re.DOTALL) if not m: self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output)) return m.group(1), gdb_output def assertEndsWith(self, actual, exp_end): self.assertTrue(actual.endswith(exp_end), msg='%r did not end with %r' % (actual, exp_end)) def assertMultilineMatches(self, actual, pattern): m = re.match(pattern, actual, re.DOTALL) if not m: self.fail(msg='%r did not match %r' % (actual, pattern)) def get_sample_script(self): return findfile('gdb_sample.py') class PrettyPrintTests(DebuggerTests): def test_getting_backtrace(self): gdb_output = self.get_stack_trace('id(42)') self.assertTrue(BREAKPOINT_FN in gdb_output) def assertGdbRepr(self, val, exp_repr=None): gdb_repr, gdb_output = self.get_gdb_repr('id(' + ascii(val) + ')') if not exp_repr: exp_repr = repr(val) self.assertEqual(gdb_repr, exp_repr, ('%r did not equal expected %r; full output was:\n%s' % (gdb_repr, exp_repr, gdb_output))) def test_int(self): self.assertGdbRepr(42) self.assertGdbRepr(0) self.assertGdbRepr(-7) self.assertGdbRepr(1000000000000) self.assertGdbRepr(-1000000000000000) def test_singletons(self): self.assertGdbRepr(True) self.assertGdbRepr(False) self.assertGdbRepr(None) def test_dicts(self): self.assertGdbRepr({}) self.assertGdbRepr({'foo': 'bar'}, "{'foo': 'bar'}") self.assertGdbRepr({'foo': 'bar', 'douglas': 42}, "{'foo': 'bar', 'douglas': 42}") def test_lists(self): self.assertGdbRepr([]) self.assertGdbRepr(list(range(5))) def test_bytes(self): self.assertGdbRepr(b'') self.assertGdbRepr(b'And now for something hopefully the same') self.assertGdbRepr(b'string with embedded NUL here \0 and then some more text') self.assertGdbRepr(b'this is a tab:\t' b' this is a slash-N:\n' b' this is a slash-R:\r' ) self.assertGdbRepr(b'this is byte 255:\xff and byte 128:\x80') self.assertGdbRepr(bytes([b for b in range(255)])) def test_strings(self): out, err = run_gdb( '--eval-command', 'python import locale; print(locale.getpreferredencoding())') encoding = out.rstrip() if err or not encoding: raise RuntimeError( f'unable to determine the preferred encoding ' f'of embedded Python in GDB: {err}') def check_repr(text): try: text.encode(encoding) except UnicodeEncodeError: self.assertGdbRepr(text, ascii(text)) else: self.assertGdbRepr(text) self.assertGdbRepr('') self.assertGdbRepr('And now for something hopefully the same') self.assertGdbRepr('string with embedded NUL here \0 and then some more text') check_repr('\u2620') check_repr('\u6587\u5b57\u5316\u3051') check_repr(chr(0x1D121)) def test_tuples(self): self.assertGdbRepr(tuple(), '()') self.assertGdbRepr((1,), '(1,)') self.assertGdbRepr(('foo', 'bar', 'baz')) def test_sets(self): if (gdb_major_version, gdb_minor_version) < (7, 3): self.skipTest("pretty-printing of sets needs gdb 7.3 or later") self.assertGdbRepr(set(), "set()") self.assertGdbRepr(set(['a']), "{'a'}") if not sys.flags.ignore_environment: self.assertGdbRepr(set(['a', 'b']), "{'a', 'b'}") self.assertGdbRepr(set([4, 5, 6]), "{4, 5, 6}") gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b']) s.remove('a') id(s)''') self.assertEqual(gdb_repr, "{'b'}") def test_frozensets(self): if (gdb_major_version, gdb_minor_version) < (7, 3): self.skipTest("pretty-printing of frozensets needs gdb 7.3 or later") self.assertGdbRepr(frozenset(), "frozenset()") self.assertGdbRepr(frozenset(['a']), "frozenset({'a'})") if not sys.flags.ignore_environment: self.assertGdbRepr(frozenset(['a', 'b']), "frozenset({'a', 'b'})") self.assertGdbRepr(frozenset([4, 5, 6]), "frozenset({4, 5, 6})") def test_exceptions(self): gdb_repr, gdb_output = self.get_gdb_repr(''' try: raise RuntimeError("I am an error") except RuntimeError as e: id(e) ''') self.assertEqual(gdb_repr, "RuntimeError('I am an error',)") gdb_repr, gdb_output = self.get_gdb_repr(''' try: a = 1 / 0 except ZeroDivisionError as e: id(e) ''') self.assertEqual(gdb_repr, "ZeroDivisionError('division by zero',)") def test_modern_class(self): gdb_repr, gdb_output = self.get_gdb_repr(''' class Foo: pass foo = Foo() foo.an_int = 42 id(foo)''') m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr) self.assertTrue(m, msg='Unexpected new-style class rendering %r' % gdb_repr) def test_subclassing_list(self): gdb_repr, gdb_output = self.get_gdb_repr(''' class Foo(list): pass foo = Foo() foo += [1, 2, 3] foo.an_int = 42 id(foo)''') m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr) self.assertTrue(m, msg='Unexpected new-style class rendering %r' % gdb_repr) def test_subclassing_tuple(self): gdb_repr, gdb_output = self.get_gdb_repr(''' class Foo(tuple): pass foo = Foo((1, 2, 3)) foo.an_int = 42 id(foo)''') m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr) self.assertTrue(m, msg='Unexpected new-style class rendering %r' % gdb_repr)
BSD 3-Clause New or Revised License
zebrium/zebrium-kubernetes-demo
manage.py
list
python
def list(args): experiments = sorted(os.listdir('./litmus')) print_color("Available Litmus Chaos Experiments:\n\n") i = 1 for experiment_file in experiments: print_color(f"\t{i}. {experiment_file.replace('.yaml', '')}") i += 1
List all available Litmus Chaos Experiments available in this repository
https://github.com/zebrium/zebrium-kubernetes-demo/blob/fddf3a05fa798d8f49c40ea3f1ed2f24441f27b7/manage.py#L252-L261
import argparse import os import json import sys import time from datetime import datetime import subprocess import yaml class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def print_color(text: str, color:bcolors = bcolors.BOLD): print(f"{color}{text}{bcolors.ENDC}") def run_shell(cmd: str): print_color(f"** RUNNING: {cmd}") os.system(cmd) def start(args): print_color(f"Starting GKE cluster in project {args.project} with name {args.name} in zone {args.zone}", bcolors.OKBLUE) run_shell("gcloud components update") run_shell(f"gcloud config set project \"{args.project}\"") run_shell(f"gcloud container clusters create {args.name} --zone {args.zone} --cluster-version 1.14.10-gke.17 --machine-type n1-standard-2 --no-enable-autoupgrade") run_shell(f"gcloud container clusters get-credentials {args.name} --zone {args.zone}") print_color("\nGKE Cluster Running with following nodes:\n") run_shell(f"kubectl get nodes") ze_deployment_name = "zebrium-k8s-demo" ze_logs_url = "https://zapi03.zebrium.com" ze_stats_url = "https://zapi03.zebrium.com/stats/api/v1/zstats" run_shell("sleep 90") run_shell("kubectl create namespace zebrium") run_shell(f"helm install zlog-collector --namespace zebrium --set zebrium.deployment={ze_deployment_name},zebrium.collectorUrl={ze_logs_url},zebrium.authToken={args.key} --repo https://raw.githubusercontent.com/zebrium/ze-kubernetes-collector/master/charts zlog-collector") run_shell(f"helm install zstats-collector --namespace zebrium --set zebrium.deployment={ze_deployment_name},zebrium.collectorUrl={ze_stats_url},zebrium.authToken={args.key} --repo https://raw.githubusercontent.com/zebrium/ze-stats/master/charts zstats") run_shell("kubectl create -f ./deploy/sock-shop.yaml") run_shell("kubectl create -f ./deploy/random-log-counter.yaml") run_shell("kubectl create namespace kafka") run_shell("helm repo add confluentinc https://confluentinc.github.io/cp-helm-charts/") run_shell("helm repo update") run_shell("helm install kafka-cluster --set cp-schema-registry.enabled=false,cp-kafka-rest.enabled=false,cp-kafka-connect.enabled=false,cp-control-center.enabled=false,cp-ksql-server.enabled=false confluentinc/cp-helm-charts --namespace=kafka") run_shell('kubectl annotate sts/kafka-cluster-cp-kafka litmuschaos.io/chaos="true" -n kafka') run_shell("kubectl apply -f https://litmuschaos.github.io/pages/litmus-operator-v1.1.0.yaml") run_shell("curl -sL https://github.com/litmuschaos/chaos-charts/archive/1.1.1.tar.gz -o litmus.tar.gz") run_shell("tar -zxvf litmus.tar.gz") run_shell("rm litmus.tar.gz") run_shell("find chaos-charts-1.1.1 -name experiments.yaml | grep generic | xargs kubectl apply -n sock-shop -f") run_shell("find chaos-charts-1.1.1 -name experiments.yaml | grep kafka | xargs kubectl apply -n kafka -f") run_shell("kubectl create -f ./deploy/litmus-rbac.yaml") run_shell("sleep 60") print_color("\nIngress Details:\n", bcolors.UNDERLINE) run_shell("kubectl get ingress basic-ingress --namespace=sock-shop") try: ingress_ip = json.loads(os.popen('kubectl get ingress basic-ingress --namespace=sock-shop -o json').read())["status"][ "loadBalancer"]["ingress"][0]["ip"] print_color(f"\nYou can access the web application in a few minutes at: http://{ingress_ip}\n\n") except: print_color("Ingress still being setup. Use the following command to get the IP later:", bcolors.WARNING) print_color("\tkubectl get ingress basic-ingress --namespace=sock-shop", bcolors.WARNING) print_color("***************************************************************************************************", bcolors.WARNING) print_color(f"* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Finished creating cluster.", bcolors.WARNING) print_color("* Please wait at least 15 minutes for environment to become fully initalised.") print_color("* The ingress to access the web application from your browser can take at least 5 minutes to create.", bcolors.WARNING) print_color("*", bcolors.WARNING) print_color("*", bcolors.WARNING) print_color("* IMPORTANT: To reliably detect Chaos experiment incidents you must reduce the Refractory Period for your account to 10 minutes.", bcolors.WARNING) print_color("* You can do this under your Advanced account settings at https://portal03.zebrium.com/Settings/advanced.",bcolors.WARNING) print_color("*", bcolors.WARNING) print_color("***************************************************************************************************\n\n", bcolors.WARNING) def stop(args): print_color(f"Stopping GKE cluster in project {args.project} with name {args.name} in zone {args.zone}", bcolors.OKBLUE) run_shell(f"gcloud config set project \"{args.project}\"") run_shell(f"gcloud container clusters delete {args.name} --zone {args.zone}") class ExperimentResult(object): def __init__(self, name:str, status:str, startTime:datetime): self.name = name self.status = status self.startTime = startTime def run_experiment(experiment: str, delay: int = 0): print_color("***************************************************************************************************", bcolors.OKBLUE) print_color(f"* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Experiment: {experiment}", bcolors.OKBLUE) print_color("***************************************************************************************************", bcolors.OKBLUE) experiment_file = experiment + ".yaml" with open(f"./litmus/{experiment_file}") as f: spec = yaml.load(f, Loader=yaml.FullLoader) result_name = spec['metadata']['name'] namespace = spec['metadata']['namespace'] if (delay > 0): spec['spec']['experiments'][0]['spec']['components']['env'].append({'name': 'RAMP_TIME', 'value': str(delay)}) with open(r"temp.yaml", 'w') as temp: yaml.dump(spec, temp) print_color(f"Running Litmus ChaosEngine Experiment {experiment_file} in namespace {namespace} with delay {delay} seconds...") print_color(f"Deploying {experiment_file}...") run_shell(f"kubectl delete chaosengine {result_name} -n {namespace}") run_shell(f"kubectl create -f temp.yaml -n {namespace}") startTime = datetime.now() print_color(f"{startTime.strftime('%Y-%m-%d %H:%M:%S')} Running experiment...") expStatusCmd = "kubectl get chaosengine " + result_name + " -o jsonpath='{.status.experiments[0].status}' -n " + namespace run_shell(expStatusCmd) logs_cmd = f"kubectl logs --since=10s -l name={experiment} -n {namespace}" print(f"\n{bcolors.OKGREEN}//** Experiment Logs ({logs_cmd}) **//\n\n") try: while subprocess.check_output(expStatusCmd, shell=True).decode('unicode-escape') != "Execution Successful": os.system(logs_cmd) os.system("sleep 10") print(f"\n\n//** End of Experiment Logs **//{bcolors.ENDC}\n") run_shell(f"kubectl describe chaosresult {result_name}-{experiment} -n {namespace}") run_shell('rm temp.yaml') except: print_color("User has cancelled script execution.", bcolors.FAIL) sys.exit(2) status = subprocess.check_output("kubectl get chaosresult " + result_name + "-" + experiment + " -n " + namespace + " -o jsonpath='{.spec.experimentstatus.verdict}'", shell=True).decode('unicode-escape') return ExperimentResult(experiment, status, startTime) def test(args): experiments = sorted(os.listdir('./litmus')) experiment_results = [] if args.test == '*': print_color(f"Running all Litmus ChaosEngine Experiments with {args.wait} mins wait time between each one...") lstindex = len(experiments) for experiment_file in experiments: result = run_experiment(experiment_file.replace('.yaml', ''), args.delay) experiment_results.append(result) print_color(f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Waiting {args.wait} mins before running next experiment...", bcolors.WARNING) lstindex -= 1 if lstindex != 0: time.sleep(args.wait * 60) else: experiment_file = args.test + ".yaml" if experiment_file in experiments: result = run_experiment(args.test, args.delay) experiment_results.append(result) else: print_color(f"ERROR: {experiment_file} not found in ./litmus directory. Please check the name and try again.", bcolors.FAIL) sys.exit(2) print_color("***************************************************************************************************", bcolors.OKBLUE) print_color("* Experiments Result Summary", bcolors.OKBLUE) print_color("***************************************************************************************************\n", bcolors.OKBLUE) headers = ["#", "Start Time", "Experiment", "Status"] row_format = "{:>25}" * (len(headers) + 1) print_color(row_format.format("", *headers), bcolors.OKBLUE) i = 1 for result in experiment_results: print_color(row_format.format("", str(i), result.startTime.strftime('%Y-%m-%d %H:%M:%S'), result.name, result.status), bcolors.OKBLUE) i += 1 print("\n")
MIT License
onnxbot/onnx-fb-universe
test/verify.py
Errors.failWith
python
def failWith(self, msg): self.addErr(msg) self.fail()
Add an error to the error context, and then short-circuit.
https://github.com/onnxbot/onnx-fb-universe/blob/076e15d3d6d48c1ca792566bf9c23d07cb6910e1/test/verify.py#L163-L168
import torch import torch.jit import torch.onnx import onnx import onnx.helper import numpy as np import difflib import contextlib import io def colonize(msg, sep=": "): if not msg: return "" else: return msg + sep class Errors(object): def __init__(self, msg, rtol=1e-3, atol=1e-7): self.msg = msg self.errors = [] self.context = [] self.rtol = rtol self.atol = atol class ShortCircuit(Exception): pass self.exc_class = ShortCircuit def requireAlmostEqual(self, x, y, msg=None): self.almostEqualAndThen(x, y, msg, self.failWith) def checkAlmostEqual(self, x, y, msg=None): self.almostEqualAndThen(x, y, msg, self.addErr) def almostEqualAndThen(self, x, y, msg, k): if isinstance(x, np.ndarray) and isinstance(y, np.ndarray): try: np.testing.assert_allclose(x, y, rtol=self.rtol, atol=self.atol, equal_nan=False, verbose=True) except AssertionError as e: raise k("{}{}".format(colonize(msg), str(e).lstrip())) else: raise RuntimeError("Unsupported almost equal test") def requireEqual(self, x, y, msg=None): self.equalAndThen(x, y, msg, self.failWith) def checkEqual(self, x, y, msg=None): self.equalAndThen(x, y, msg, self.addErr) def equalAndThen(self, x, y, msg, k): if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto): self.equalAndThen(x.name, y.name, msg, k) t1 = onnx.numpy_helper.to_array(x) t2 = onnx.numpy_helper.to_array(y) new_msg = "{}In embedded parameter '{}'".format(colonize(msg), x.name) self.equalAndThen(t1, t2, new_msg, k) elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray): try: np.testing.assert_equal(x, y) except AssertionError as e: raise k("{}{}".format(colonize(msg, ": "), str(e).lstrip())) else: if x != y: sx = str(x) sy = str(y) if len(sx) > 40 or len(sy) > 40 or '\n' in sx or '\n' in sy: l = "=" * 50 k("\n{}The value\n{}\n{}\n{}\n\ndoes not equal\n\n{}\n{}\n{}" .format(colonize(msg, ":\n"), l, sx, l, l, sy, l)) else: k("{}{} != {}".format(colonize(msg), sx, sy)) def requireMultiLineEqual(self, x, y, msg=None): self.multiLineEqualAndThen(x, y, msg, self.failWith) def multiLineEqualAndThen(self, x, y, msg, k): if msg is None: msg = "Strings are not equal" if x != y: diff = difflib.ndiff(x.splitlines(True), y.splitlines(True)) k("{}{}".format(colonize(msg, ":\n\n"), "".join(diff))) def addErr(self, msg): msg_w_ctx = msg for c in reversed(self.context): msg += "\n\n * " + "\n ".join(c.splitlines()) self.errors.append(msg) def fail(self): raise self.exc_class()
MIT License
demisto/demisto-py
demisto_client/demisto_api/models/playbook.py
Playbook.name
python
def name(self, name): self._name = name
Sets the name of this Playbook. :param name: The name of this Playbook. # noqa: E501 :type: str
https://github.com/demisto/demisto-py/blob/95d29e07693d27c133f7fe6ef9da13e4b6dbf542/demisto_client/demisto_api/models/playbook.py#L581-L589
import pprint import re import six from demisto_client.demisto_api.models.playbook_inputs import PlaybookInputs from demisto_client.demisto_api.models.playbook_outputs import PlaybookOutputs from demisto_client.demisto_api.models.playbook_task import PlaybookTask from demisto_client.demisto_api.models.playbook_view import PlaybookView from demisto_client.demisto_api.models.version import Version class Playbook(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'brands': 'list[str]', 'commands': 'list[str]', 'comment': 'str', 'commit_message': 'str', 'dbot_created_by': 'str', 'detached': 'bool', 'from_server_version': 'Version', 'has_role': 'bool', 'hidden': 'bool', 'id': 'str', 'inputs': 'PlaybookInputs', 'item_version': 'Version', 'locked': 'bool', 'missing_scripts_ids': 'list[str]', 'modified': 'datetime', 'name': 'str', 'name_raw': 'str', 'outputs': 'PlaybookOutputs', 'pack_id': 'str', 'prev_name': 'str', 'previous_roles': 'list[str]', 'primary_term': 'int', 'private': 'bool', 'propagation_labels': 'list[str]', 'quiet': 'bool', 'roles': 'list[str]', 'script_ids': 'list[str]', 'sequence_number': 'int', 'should_commit': 'bool', 'sort_values': 'list[str]', 'source_playbook_id': 'str', 'start_task_id': 'str', 'system': 'bool', 'tags': 'list[str]', 'task_ids': 'list[str]', 'tasks': 'dict(str, PlaybookTask)', 'to_server_version': 'Version', 'vc_should_ignore': 'bool', 'version': 'int', 'view': 'PlaybookView' } attribute_map = { 'brands': 'brands', 'commands': 'commands', 'comment': 'comment', 'commit_message': 'commitMessage', 'dbot_created_by': 'dbotCreatedBy', 'detached': 'detached', 'from_server_version': 'fromServerVersion', 'has_role': 'hasRole', 'hidden': 'hidden', 'id': 'id', 'inputs': 'inputs', 'item_version': 'itemVersion', 'locked': 'locked', 'missing_scripts_ids': 'missingScriptsIds', 'modified': 'modified', 'name': 'name', 'name_raw': 'nameRaw', 'outputs': 'outputs', 'pack_id': 'packID', 'prev_name': 'prevName', 'previous_roles': 'previousRoles', 'primary_term': 'primaryTerm', 'private': 'private', 'propagation_labels': 'propagationLabels', 'quiet': 'quiet', 'roles': 'roles', 'script_ids': 'scriptIds', 'sequence_number': 'sequenceNumber', 'should_commit': 'shouldCommit', 'sort_values': 'sortValues', 'source_playbook_id': 'sourcePlaybookID', 'start_task_id': 'startTaskId', 'system': 'system', 'tags': 'tags', 'task_ids': 'taskIds', 'tasks': 'tasks', 'to_server_version': 'toServerVersion', 'vc_should_ignore': 'vcShouldIgnore', 'version': 'version', 'view': 'view' } def __init__(self, brands=None, commands=None, comment=None, commit_message=None, dbot_created_by=None, detached=None, from_server_version=None, has_role=None, hidden=None, id=None, inputs=None, item_version=None, locked=None, missing_scripts_ids=None, modified=None, name=None, name_raw=None, outputs=None, pack_id=None, prev_name=None, previous_roles=None, primary_term=None, private=None, propagation_labels=None, quiet=None, roles=None, script_ids=None, sequence_number=None, should_commit=None, sort_values=None, source_playbook_id=None, start_task_id=None, system=None, tags=None, task_ids=None, tasks=None, to_server_version=None, vc_should_ignore=None, version=None, view=None): self._brands = None self._commands = None self._comment = None self._commit_message = None self._dbot_created_by = None self._detached = None self._from_server_version = None self._has_role = None self._hidden = None self._id = None self._inputs = None self._item_version = None self._locked = None self._missing_scripts_ids = None self._modified = None self._name = None self._name_raw = None self._outputs = None self._pack_id = None self._prev_name = None self._previous_roles = None self._primary_term = None self._private = None self._propagation_labels = None self._quiet = None self._roles = None self._script_ids = None self._sequence_number = None self._should_commit = None self._sort_values = None self._source_playbook_id = None self._start_task_id = None self._system = None self._tags = None self._task_ids = None self._tasks = None self._to_server_version = None self._vc_should_ignore = None self._version = None self._view = None self.discriminator = None if brands is not None: self.brands = brands if commands is not None: self.commands = commands if comment is not None: self.comment = comment if commit_message is not None: self.commit_message = commit_message if dbot_created_by is not None: self.dbot_created_by = dbot_created_by if detached is not None: self.detached = detached if from_server_version is not None: self.from_server_version = from_server_version if has_role is not None: self.has_role = has_role if hidden is not None: self.hidden = hidden if id is not None: self.id = id if inputs is not None: self.inputs = inputs if item_version is not None: self.item_version = item_version if locked is not None: self.locked = locked if missing_scripts_ids is not None: self.missing_scripts_ids = missing_scripts_ids if modified is not None: self.modified = modified if name is not None: self.name = name if name_raw is not None: self.name_raw = name_raw if outputs is not None: self.outputs = outputs if pack_id is not None: self.pack_id = pack_id if prev_name is not None: self.prev_name = prev_name if previous_roles is not None: self.previous_roles = previous_roles if primary_term is not None: self.primary_term = primary_term if private is not None: self.private = private if propagation_labels is not None: self.propagation_labels = propagation_labels if quiet is not None: self.quiet = quiet if roles is not None: self.roles = roles if script_ids is not None: self.script_ids = script_ids if sequence_number is not None: self.sequence_number = sequence_number if should_commit is not None: self.should_commit = should_commit if sort_values is not None: self.sort_values = sort_values if source_playbook_id is not None: self.source_playbook_id = source_playbook_id if start_task_id is not None: self.start_task_id = start_task_id if system is not None: self.system = system if tags is not None: self.tags = tags if task_ids is not None: self.task_ids = task_ids if tasks is not None: self.tasks = tasks if to_server_version is not None: self.to_server_version = to_server_version if vc_should_ignore is not None: self.vc_should_ignore = vc_should_ignore if version is not None: self.version = version if view is not None: self.view = view @property def brands(self): return self._brands @brands.setter def brands(self, brands): self._brands = brands @property def commands(self): return self._commands @commands.setter def commands(self, commands): self._commands = commands @property def comment(self): return self._comment @comment.setter def comment(self, comment): self._comment = comment @property def commit_message(self): return self._commit_message @commit_message.setter def commit_message(self, commit_message): self._commit_message = commit_message @property def dbot_created_by(self): return self._dbot_created_by @dbot_created_by.setter def dbot_created_by(self, dbot_created_by): self._dbot_created_by = dbot_created_by @property def detached(self): return self._detached @detached.setter def detached(self, detached): self._detached = detached @property def from_server_version(self): return self._from_server_version @from_server_version.setter def from_server_version(self, from_server_version): self._from_server_version = from_server_version @property def has_role(self): return self._has_role @has_role.setter def has_role(self, has_role): self._has_role = has_role @property def hidden(self): return self._hidden @hidden.setter def hidden(self, hidden): self._hidden = hidden @property def id(self): return self._id @id.setter def id(self, id): self._id = id @property def inputs(self): return self._inputs @inputs.setter def inputs(self, inputs): self._inputs = inputs @property def item_version(self): return self._item_version @item_version.setter def item_version(self, item_version): self._item_version = item_version @property def locked(self): return self._locked @locked.setter def locked(self, locked): self._locked = locked @property def missing_scripts_ids(self): return self._missing_scripts_ids @missing_scripts_ids.setter def missing_scripts_ids(self, missing_scripts_ids): self._missing_scripts_ids = missing_scripts_ids @property def modified(self): return self._modified @modified.setter def modified(self, modified): self._modified = modified @property def name(self): return self._name @name.setter
Apache License 2.0
twilio/howtos
intercom/gdata/docs/__init__.py
DocumentListAclEntryFromString
python
def DocumentListAclEntryFromString(xml_string): return atom.CreateClassFromXMLString(DocumentListAclEntry, xml_string)
Converts an XML string into a DocumentListAclEntry object. Args: xml_string: string The XML describing a Document List ACL feed entry. Returns: A DocumentListAclEntry object corresponding to the given XML.
https://github.com/twilio/howtos/blob/718853f6a89252592d13592638c18f633e061b96/intercom/gdata/docs/__init__.py#L214-L223
__author__ = ('api.jfisher (Jeff Fisher), ' '[email protected] (Eric Bidelman)') import atom import gdata DOCUMENTS_NAMESPACE = 'http://schemas.google.com/docs/2007' class Scope(atom.AtomBase): _tag = 'scope' _namespace = gdata.GACL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' _attributes['type'] = 'type' def __init__(self, value=None, type=None, extension_elements=None, extension_attributes=None, text=None): self.value = value self.type = type self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class Role(atom.AtomBase): _tag = 'role' _namespace = gdata.GACL_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, value=None, extension_elements=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class FeedLink(atom.AtomBase): _tag = 'feedLink' _namespace = gdata.GDATA_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['rel'] = 'rel' _attributes['href'] = 'href' def __init__(self, href=None, rel=None, text=None, extension_elements=None, extension_attributes=None): self.href = href self.rel = rel atom.AtomBase.__init__(self, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) class ResourceId(atom.AtomBase): _tag = 'resourceId' _namespace = gdata.GDATA_NAMESPACE _children = atom.AtomBase._children.copy() _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' def __init__(self, value=None, extension_elements=None, extension_attributes=None, text=None): self.value = value self.text = text self.extension_elements = extension_elements or [] self.extension_attributes = extension_attributes or {} class LastModifiedBy(atom.Person): _tag = 'lastModifiedBy' _namespace = gdata.GDATA_NAMESPACE class LastViewed(atom.Person): _tag = 'lastViewed' _namespace = gdata.GDATA_NAMESPACE class WritersCanInvite(atom.AtomBase): _tag = 'writersCanInvite' _namespace = DOCUMENTS_NAMESPACE _attributes = atom.AtomBase._attributes.copy() _attributes['value'] = 'value' class DocumentListEntry(gdata.GDataEntry): _tag = gdata.GDataEntry._tag _namespace = atom.ATOM_NAMESPACE _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feedLink', FeedLink) _children['{%s}resourceId' % gdata.GDATA_NAMESPACE] = ('resourceId', ResourceId) _children['{%s}lastModifiedBy' % gdata.GDATA_NAMESPACE] = ('lastModifiedBy', LastModifiedBy) _children['{%s}lastViewed' % gdata.GDATA_NAMESPACE] = ('lastViewed', LastViewed) _children['{%s}writersCanInvite' % DOCUMENTS_NAMESPACE] = ( 'writersCanInvite', WritersCanInvite) def __init__(self, resourceId=None, feedLink=None, lastViewed=None, lastModifiedBy=None, writersCanInvite=None, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, text=None, extension_elements=None, extension_attributes=None): self.feedLink = feedLink self.lastViewed = lastViewed self.lastModifiedBy = lastModifiedBy self.resourceId = resourceId self.writersCanInvite = writersCanInvite gdata.GDataEntry.__init__( self, author=author, category=category, content=content, atom_id=atom_id, link=link, published=published, title=title, updated=updated, extension_elements=extension_elements, extension_attributes=extension_attributes, text=text) def GetAclLink(self): return self.feedLink def GetDocumentType(self): if self.category: for category in self.category: if category.scheme == gdata.GDATA_NAMESPACE + '#kind': return category.label else: return None def DocumentListEntryFromString(xml_string): return atom.CreateClassFromXMLString(DocumentListEntry, xml_string) class DocumentListAclEntry(gdata.GDataEntry): _tag = gdata.GDataEntry._tag _namespace = gdata.GDataEntry._namespace _children = gdata.GDataEntry._children.copy() _attributes = gdata.GDataEntry._attributes.copy() _children['{%s}scope' % gdata.GACL_NAMESPACE] = ('scope', Scope) _children['{%s}role' % gdata.GACL_NAMESPACE] = ('role', Role) def __init__(self, category=None, atom_id=None, link=None, title=None, updated=None, scope=None, role=None, extension_elements=None, extension_attributes=None, text=None): gdata.GDataEntry.__init__(self, author=None, category=category, content=None, atom_id=atom_id, link=link, published=None, title=title, updated=updated, text=None) self.scope = scope self.role = role
MIT License
aldebaran/qibuild
python/qitest/runner.py
TestSuiteRunner.launcher
python
def launcher(self): pass
This function should return a :py:class:`.TestLauncher`
https://github.com/aldebaran/qibuild/blob/efea6fa3744664348717fe5e8df708a3cf392072/python/qitest/runner.py#L44-L46
from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import re import os import json import abc import qitest.test_queue from qisys import ui class TestSuiteRunner(object): __metaclass__ = abc.ABCMeta def __init__(self, project): self.project = project self._patterns = list() self._excludes = list() self.num_jobs = 1 self.repeat_until_fail = 0 self.cwd = os.getcwd() self.env = None self.verbose = False self.perf = False self.nightly = False self.coverage = False self.nightmare = False self.test_output_dir = None self.capture = True self.last_failed = False self._tests = project.tests @abc.abstractproperty
BSD 3-Clause New or Revised License
burnash/gspread
gspread/worksheet.py
Worksheet.row_values
python
def row_values(self, row, **kwargs): try: data = self.get("A{}:{}".format(row, row), **kwargs) return data[0] if data else [] except KeyError: return []
Returns a list of all values in a `row`. Empty cells in this list will be rendered as :const:`None`. :param int row: Row number (one-based). :param str value_render_option: (optional) Determines how values should be rendered in the the output. See `ValueRenderOption`_ in the Sheets API. .. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
https://github.com/burnash/gspread/blob/90a728fac1c8f6fb38f19da588de0337697854cc/gspread/worksheet.py#L414-L430
from .cell import Cell from .urls import SPREADSHEET_URL, WORKSHEET_DRIVE_URL from .utils import ( a1_range_to_grid_range, a1_to_rowcol, absolute_range_name, accepted_kwargs, cast_to_a1_notation, cell_list_to_rect, fill_gaps, filter_dict_values, finditem, is_scalar, numericise_all, rowcol_to_a1, ) class ValueRange(list): @classmethod def from_json(cls, json): values = json.get("values", []) new_obj = cls(values) new_obj._json = { "range": json["range"], "majorDimension": json["majorDimension"], } return new_obj @property def range(self): return self._json["range"] @property def major_dimension(self): return self._json["majorDimension"] def first(self, default=None): try: return self[0][0] except IndexError: return default class Worksheet: def __init__(self, spreadsheet, properties): self.spreadsheet = spreadsheet self.client = spreadsheet.client self._properties = properties def __repr__(self): return "<{} {} id:{}>".format( self.__class__.__name__, repr(self.title), self.id, ) @property def id(self): return self._properties["sheetId"] @property def title(self): return self._properties["title"] @property def url(self): return WORKSHEET_DRIVE_URL % (self.spreadsheet.id, self.id) @property def index(self): return self._properties["index"] @property def updated(self): import warnings warnings.warn( "Worksheet.updated() is deprecated, " "this feature is not supported in Sheets API v4", DeprecationWarning, stacklevel=2, ) @property def row_count(self): return self._properties["gridProperties"]["rowCount"] @property def col_count(self): return self._properties["gridProperties"]["columnCount"] @property def frozen_row_count(self): return self._properties["gridProperties"].get("frozenRowCount", 0) @property def frozen_col_count(self): return self._properties["gridProperties"].get("frozenColumnCount", 0) def acell(self, label, value_render_option="FORMATTED_VALUE"): return self.cell( *(a1_to_rowcol(label)), value_render_option=value_render_option ) def cell(self, row, col, value_render_option="FORMATTED_VALUE"): try: data = self.get( rowcol_to_a1(row, col), value_render_option=value_render_option ) value = data.first() except KeyError: value = "" return Cell(row, col, value) @cast_to_a1_notation def range(self, name): range_label = absolute_range_name(self.title, name) data = self.spreadsheet.values_get(range_label) if ":" not in name: name = data.get("range", "") if "!" in name: name = name.split("!")[1] grid_range = a1_range_to_grid_range(name) values = data.get("values", []) row_offset = grid_range.get("startRowIndex", 0) column_offset = grid_range.get("startColumnIndex", 0) last_row = grid_range.get("endRowIndex", None) last_column = grid_range.get("endColumnIndex", None) if last_row is not None: last_row -= row_offset if last_column is not None: last_column -= column_offset rect_values = fill_gaps( values, rows=last_row, cols=last_column, ) return [ Cell(row=i + row_offset + 1, col=j + column_offset + 1, value=value) for i, row in enumerate(rect_values) for j, value in enumerate(row) ] @accepted_kwargs( major_dimension=None, value_render_option=None, date_time_render_option=None, ) def get_values(self, range_name=None, **kwargs): try: return fill_gaps(self.get(range_name, **kwargs)) except KeyError: return [] @accepted_kwargs( major_dimension=None, value_render_option=None, date_time_render_option=None, ) def get_all_values(self, **kwargs): return self.get_values(**kwargs) def get_all_records( self, empty2zero=False, head=1, default_blank="", allow_underscores_in_numeric_literals=False, numericise_ignore=None, value_render_option=None, ): idx = head - 1 data = self.get_all_values(value_render_option=value_render_option) if len(data) <= idx: return [] keys = data[idx] if numericise_ignore == ["all"]: values = data[idx + 1 :] else: values = [ numericise_all( row, empty2zero, default_blank, allow_underscores_in_numeric_literals, numericise_ignore, ) for row in data[idx + 1 :] ] return [dict(zip(keys, row)) for row in values] @accepted_kwargs( major_dimension=None, value_render_option=None, date_time_render_option=None, )
MIT License
reliaqualassociates/ramstk
src/ramstk/views/gtk3/fmea/view.py
FMEAWorkView._do_request_insert_child
python
def _do_request_insert_child(self, __button: Gtk.ToolButton) -> None: _model, _row = self._pnlPanel.tvwTreeView.get_selection().get_selected() try: _parent_id = _model.get_value(_row, 0) _level = { 1: "mechanism", 2: "cause", 3: "control_action", }[len(str(_parent_id).split("."))] except TypeError: _parent_id = "0" _level = "mechanism" if _level == "control_action": _level = self.__on_request_insert_control_action() super().do_set_cursor_busy() do_request_insert(_level, _parent_id)
Request to insert a new entity to the FMEA. :return: None :rtype: None
https://github.com/reliaqualassociates/ramstk/blob/ffec5a107424914cf0026c6dfe26369c221f79f9/src/ramstk/views/gtk3/fmea/view.py#L219-L246
from typing import Any, Dict, List from pubsub import pub from ramstk.configuration import ( RAMSTK_CONTROL_TYPES, RAMSTK_CRITICALITY, RAMSTK_FAILURE_PROBABILITY, RAMSTKUserConfiguration, ) from ramstk.logger import RAMSTKLogManager from ramstk.views.gtk3 import Gtk, _ from ramstk.views.gtk3.assistants import AddControlAction from ramstk.views.gtk3.widgets import RAMSTKMessageDialog, RAMSTKPanel, RAMSTKWorkView from . import FMEAMethodPanel, FMEATreePanel def do_request_insert(level: str, parent_id: str) -> None: if level == "mode": pub.sendMessage("request_insert_fmea_mode") elif level == "mechanism": pub.sendMessage("request_insert_fmea_mechanism", mode_id=str(parent_id)) elif level == "cause": pub.sendMessage("request_insert_fmea_cause", parent_id=str(parent_id)) elif level in ["control", "action"]: pub.sendMessage(f"request_insert_fmea_{level}", parent_id=str(parent_id)) class FMEAWorkView(RAMSTKWorkView): _tag: str = "fmea" _pixbuf: bool = True _tablabel: str = _("FMEA") _tabtooltip: str = _( "Displays failure mode and effects analysis (FMEA) information for the " "selected Hardware item." ) def __init__( self, configuration: RAMSTKUserConfiguration, logger: RAMSTKLogManager ) -> None: super().__init__(configuration, logger) self._lst_callbacks.insert(0, self._do_request_insert_sibling) self._lst_callbacks.insert(1, self._do_request_insert_child) self._lst_callbacks.insert(2, self._do_request_delete) self._lst_callbacks.insert(3, self._do_request_calculate) self._lst_icons.insert(0, "insert_sibling") self._lst_icons.insert(1, "insert_child") self._lst_icons.insert(2, "remove") self._lst_icons.insert(3, "calculate") self._lst_mnu_labels.insert(0, _("Add Sibling")) self._lst_mnu_labels.insert(1, _("Add Child")) self._lst_mnu_labels.insert(2, _("Delete Selected")) self._lst_mnu_labels.insert(3, _("Calculate FMEA")) self._lst_tooltips: List[str] = [ _( "Add a new (D)FME(C)A entity at the same level as the " "currently selected entity." ), _( "Add a new (D)FME(C)A entity one level below the currently " "selected entity." ), _("Delete the selected entity from the (D)FME(C)A."), _( "Calculate the Task 102 criticality and/or risk priority " "number (RPN)." ), _("Save changes to the selected entity in the (D)FME(C)A."), _("Save changes to all entities in the (D)FME(C)A."), ] self._item_hazard_rate: float = 0.0 self._pnlMethods: RAMSTKPanel = FMEAMethodPanel() self._pnlPanel: RAMSTKPanel = FMEATreePanel() self.__make_ui() pub.subscribe(self._do_set_record_id, "selected_fmea") pub.subscribe( self._on_get_hardware_attributes, "succeed_get_all_hardware_attributes" ) def _do_request_calculate(self, __button: Gtk.ToolButton) -> None: if self._pnlMethods.chkCriticality.get_active(): pub.sendMessage( "request_calculate_criticality", item_hr=self._item_hazard_rate ) if self._pnlMethods.chkRPN.get_active(): pub.sendMessage("request_calculate_rpn", method="mechanism") def _do_request_delete(self, __button: Gtk.ToolButton) -> None: _parent = self.get_parent().get_parent().get_parent().get_parent() _model, _row = self._pnlPanel.tvwTreeView.get_selection().get_selected() _node_id = _model.get_value(_row, 0) _prompt = _( "You are about to delete {1} item {0} and all " "data associated with it. Is this really what " "you want to do?" ).format(_node_id, self._tag.title()) _dialog = RAMSTKMessageDialog(parent=_parent) _dialog.do_set_message(_prompt) _dialog.do_set_message_type("question") if _dialog.do_run() == Gtk.ResponseType.YES: super().do_set_cursor_busy() pub.sendMessage( "request_delete_fmea", node_id=_node_id, ) _dialog.do_destroy()
BSD 3-Clause New or Revised License
barrust/pyprobables
probables/cuckoo/countingcuckoo.py
CountingCuckooBin.__repr__
python
def __repr__(self): return self.__str__()
how do we represent this?
https://github.com/barrust/pyprobables/blob/f348fb878cdfbe6c1d997be093c073d26f9b05aa/probables/cuckoo/countingcuckoo.py#L274-L276
import os import random from struct import calcsize, pack, unpack from ..exceptions import CuckooFilterFullError from .cuckoo import CuckooFilter class CountingCuckooFilter(CuckooFilter): __slots__ = [ "__unique_elements", "_inserted_elements", "_bucket_size", "__max_cuckoo_swaps", "_cuckoo_capacity", "_buckets", ] def __init__( self, capacity=10000, bucket_size=4, max_swaps=500, expansion_rate=2, auto_expand=True, finger_size=4, filepath=None, hash_function=None, ): self.__unique_elements = 0 super(CountingCuckooFilter, self).__init__( capacity, bucket_size, max_swaps, expansion_rate, auto_expand, finger_size, filepath, hash_function, ) def __contains__(self, val): if self.check(val) > 0: return True return False @property def unique_elements(self): return self.__unique_elements def load_factor(self): return self.unique_elements / (self.capacity * self.bucket_size) def add(self, key): idx_1, idx_2, fingerprint = self._generate_fingerprint_info(key) is_present = self._check_if_present(idx_1, idx_2, fingerprint) if is_present is not None: for bucket in self.buckets[is_present]: if fingerprint in bucket: bucket.increment() self._inserted_elements += 1 return finger = self._insert_fingerprint_alt(fingerprint, idx_1, idx_2) self._deal_with_insertion(finger) def check(self, key): idx_1, idx_2, fingerprint = self._generate_fingerprint_info(key) is_present = self._check_if_present(idx_1, idx_2, fingerprint) if is_present is not None: for bucket in self.buckets[is_present]: if fingerprint in bucket: return bucket.count return 0 def remove(self, key): idx_1, idx_2, fingerprint = self._generate_fingerprint_info(key) idx = self._check_if_present(idx_1, idx_2, fingerprint) if idx is None: return False for bucket in self.buckets[idx]: if fingerprint in bucket: bucket.decrement() self._inserted_elements -= 1 if bucket.count == 0: self.buckets[idx].remove(bucket) self.__unique_elements -= 1 return True return False def expand(self): self._expand_logic(None) def export(self, filename): with open(filename, "wb") as filepointer: for bucket in self.buckets: rep = len(bucket) * "II" wbyt = pack(rep, *[x for x in self.__bucket_decomposition(bucket)]) filepointer.write(wbyt) leftover = self.bucket_size - len(bucket) rep = leftover * "II" filepointer.write(pack(rep, *([0] * (leftover * 2)))) filepointer.write(pack("II", self.bucket_size, self.max_swaps)) def _insert_fingerprint_alt(self, fingerprint, idx_1, idx_2, count=1): if self.__insert_element(fingerprint, idx_1, count): self._inserted_elements += 1 self.__unique_elements += 1 return None elif self.__insert_element(fingerprint, idx_2, count): self._inserted_elements += 1 self.__unique_elements += 1 return None idx = random.choice([idx_1, idx_2]) prv_bin = CountingCuckooBin(fingerprint, 1) for _ in range(self.max_swaps): swap_elm = random.randint(0, self.bucket_size - 1) swap_finger = self.buckets[idx][swap_elm] prv_bin, self.buckets[idx][swap_elm] = swap_finger, prv_bin index_1, index_2 = self._indicies_from_fingerprint(prv_bin.finger) idx = index_2 if idx == index_1 else index_1 if self.__insert_element(prv_bin.finger, idx, prv_bin.count): self._inserted_elements += 1 self.__unique_elements += 1 return None return prv_bin def _check_if_present(self, idx_1, idx_2, fingerprint): if fingerprint in [x.finger for x in self.buckets[idx_1]]: return idx_1 elif fingerprint in [x.finger for x in self.buckets[idx_2]]: return idx_2 return None def _load(self, filename): with open(filename, "rb") as filepointer: offset = calcsize("II") int_size = calcsize("II") filepointer.seek(offset * -1, os.SEEK_END) list_size = filepointer.tell() mybytes = unpack("II", filepointer.read(offset)) self._bucket_size = mybytes[0] self.__max_cuckoo_swaps = mybytes[1] self._cuckoo_capacity = list_size // int_size // self.bucket_size self._inserted_elements = 0 filepointer.seek(0, os.SEEK_SET) self._buckets = list() for i in range(self.capacity): self.buckets.append(list()) for _ in range(self.bucket_size): finger, count = unpack("II", filepointer.read(int_size)) if finger > 0: ccb = CountingCuckooBin(finger, count) self.buckets[i].append(ccb) self._inserted_elements += count self.__unique_elements += 1 def _expand_logic(self, extra_fingerprint): fingerprints = self._setup_expand(extra_fingerprint) self.__unique_elements = 0 for elm in fingerprints: idx_1, idx_2 = self._indicies_from_fingerprint(elm.finger) res = self._insert_fingerprint_alt(elm.finger, idx_1, idx_2, elm.count) if res is not None: msg = "The CountingCuckooFilter failed to expand" raise CuckooFilterFullError(msg) def __insert_element(self, fingerprint, idx, count=1): if len(self.buckets[idx]) < self.bucket_size: self.buckets[idx].append(CountingCuckooBin(fingerprint, count)) return True return False @staticmethod def __bucket_decomposition(bucket): for buck in bucket: yield buck.finger yield buck.count class CountingCuckooBin(object): __slots__ = ["__fingerprint", "__count"] def __init__(self, fingerprint, count): self.__fingerprint = fingerprint self.__count = count def __contains__(self, val): return self.__fingerprint == val @property def finger(self): return self.__fingerprint @property def count(self): return self.__count
MIT License
theislab/diffxpy
diffxpy/testing/det.py
DifferentialExpressionTestLRT.locations
python
def locations(self): di = self.full_design_loc_info sample_description = self.sample_description[[f.name() for f in di.factor_infos]] dmat = self.full_estim.input_data.design_loc dmat, sample_description = dmat_unique(dmat, sample_description) retval = self.full_estim.model.inverse_link_loc(np.matmul(dmat, self.full_estim.model.a)) retval = pd.DataFrame(retval, columns=self.full_estim.input_data.features) for col in sample_description: retval[col] = sample_description[col] retval = retval.set_index(list(sample_description.columns)) return retval
Returns a pandas.DataFrame containing the locations for the different categories of the factors :return: pd.DataFrame
https://github.com/theislab/diffxpy/blob/b8c6ae0d7d957db72e41bc2e705c240348c66509/diffxpy/testing/det.py#L606-L626
import abc try: import anndata except ImportError: anndata = None import batchglm.api as glm import dask import logging import numpy as np import patsy import pandas as pd from random import sample import scipy.sparse import sparse from typing import Union, Dict, Tuple, List, Set from .utils import split_x, dmat_unique from ..stats import stats from . import correction logger = logging.getLogger("diffxpy") class _DifferentialExpressionTest(metaclass=abc.ABCMeta): def __init__(self): self._pval = None self._qval = None self._mean = None self._log_likelihood = None @property @abc.abstractmethod def gene_ids(self) -> np.ndarray: pass @property @abc.abstractmethod def x(self): pass @abc.abstractmethod def log_fold_change(self, base=np.e, **kwargs): pass def log2_fold_change(self, **kwargs): return self.log_fold_change(base=2, **kwargs) def log10_fold_change(self, **kwargs): return self.log_fold_change(base=10, **kwargs) def _test(self, **kwargs) -> np.ndarray: pass def _correction(self, method) -> np.ndarray: if np.all(np.isnan(self.pval)): return self.pval else: return correction.correct(pvals=self.pval, method=method) def _ave(self): pass @property def log_likelihood(self): if self._log_likelihood is None: self._log_likelihood = self._ll() return self._log_likelihood @property def mean(self): if self._mean is None: self._mean = self._ave() return self._mean @property def pval(self): if self._pval is None: self._pval = self._test().copy() return self._pval @property def qval(self, method="fdr_bh"): if self._qval is None: self._qval = self._correction(method=method).copy() return self._qval def log10_pval_clean(self, log10_threshold=-30): pvals = np.reshape(self.pval, -1).astype(dtype=np.float) pvals = np.clip( pvals, np.nextafter(0, 1), np.inf ) log10_pval_clean = np.log(pvals) / np.log(10) log10_pval_clean[np.isnan(log10_pval_clean)] = 1 log10_pval_clean = np.clip(log10_pval_clean, log10_threshold, 0, log10_pval_clean) return log10_pval_clean def log10_qval_clean(self, log10_threshold=-30): qvals = np.reshape(self.qval, -1).astype(dtype=np.float) qvals = np.clip( qvals, np.nextafter(0, 1), np.inf ) log10_qval_clean = np.log(qvals) / np.log(10) log10_qval_clean[np.isnan(log10_qval_clean)] = 1 log10_qval_clean = np.clip(log10_qval_clean, log10_threshold, 0, log10_qval_clean) return log10_qval_clean @abc.abstractmethod def summary(self, **kwargs) -> pd.DataFrame: pass def _threshold_summary( self, res: pd.DataFrame, qval_thres=None, fc_upper_thres=None, fc_lower_thres=None, mean_thres=None ) -> pd.DataFrame: assert fc_lower_thres > 0 if fc_lower_thres is not None else True, "supply positive fc_lower_thres" assert fc_upper_thres > 0 if fc_upper_thres is not None else True, "supply positive fc_upper_thres" if qval_thres is not None: qvals = res['qval'].values qval_include = np.logical_not(np.isnan(qvals)) qval_include[qval_include] = qvals[qval_include] <= qval_thres res = res.iloc[qval_include, :] if fc_upper_thres is not None and fc_lower_thres is None: res = res.iloc[res['log2fc'].values >= np.log(fc_upper_thres) / np.log(2), :] elif fc_upper_thres is None and fc_lower_thres is not None: res = res.iloc[res['log2fc'].values <= np.log(fc_lower_thres) / np.log(2), :] elif fc_upper_thres is not None and fc_lower_thres is not None: res = res.iloc[np.logical_or( res['log2fc'].values <= np.log(fc_lower_thres) / np.log(2), res['log2fc'].values >= np.log(fc_upper_thres) / np.log(2)), :] if mean_thres is not None: res = res.iloc[res['mean'].values >= mean_thres, :] return res def plot_volcano( self, corrected_pval=True, log10_p_threshold=-30, log2_fc_threshold=10, alpha=0.05, min_fc=1, size=20, highlight_ids: Union[List, Tuple] = (), highlight_size: float = 30, highlight_col: str = "red", show: bool = True, save: Union[str, None] = None, suffix: str = "_volcano.png", return_axs: bool = False ): import seaborn as sns import matplotlib.pyplot as plt plt.ioff() if corrected_pval: neg_log_pvals = - self.log10_qval_clean(log10_threshold=log10_p_threshold) else: neg_log_pvals = - self.log10_pval_clean(log10_threshold=log10_p_threshold) logfc = np.reshape(self.log2_fold_change(), -1) logfc = np.clip(logfc, -log2_fc_threshold, log2_fc_threshold, logfc) fig, ax = plt.subplots() is_significant = np.logical_and( neg_log_pvals >= - np.log(alpha) / np.log(10), np.abs(logfc) >= np.log(min_fc) / np.log(2) ) sns.scatterplot(y=neg_log_pvals, x=logfc, hue=is_significant, ax=ax, legend=False, s=size, palette={True: "orange", False: "black"}) highlight_ids_found = np.array([x in self.gene_ids for x in highlight_ids]) highlight_ids_clean = [highlight_ids[i] for i in np.where(highlight_ids_found)[0]] highlight_ids_not_found = [highlight_ids[i] for i in np.where(np.logical_not(highlight_ids_found))[0]] if len(highlight_ids_not_found) > 0: logger.warning("not all highlight_ids were found in data set: ", ", ".join(highlight_ids_not_found)) if len(highlight_ids_clean) > 0: neg_log_pvals_highlights = np.zeros([len(highlight_ids_clean)]) logfc_highlights = np.zeros([len(highlight_ids_clean)]) is_highlight = np.zeros([len(highlight_ids_clean)]) for i, id_i in enumerate(highlight_ids_clean): idx = np.where(self.gene_ids == id_i)[0] neg_log_pvals_highlights[i] = neg_log_pvals[idx] logfc_highlights[i] = logfc[idx] sns.scatterplot(y=neg_log_pvals_highlights, x=logfc_highlights, hue=is_highlight, ax=ax, legend=False, s=highlight_size, palette={0: highlight_col}) if corrected_pval: ax.set(xlabel="log2FC", ylabel='-log10(corrected p-value)') else: ax.set(xlabel="log2FC", ylabel='-log10(p-value)') if save is not None: plt.savefig(save + suffix) if show: plt.show() plt.close(fig) plt.ion() if return_axs: return ax else: return def plot_ma( self, corrected_pval=True, log2_fc_threshold=10, min_mean=1e-4, alpha=0.05, size=20, highlight_ids: Union[List, Tuple] = (), highlight_size: float = 30, highlight_col: str = "red", show: bool = True, save: Union[str, None] = None, suffix: str = "_ma_plot.png", return_axs: bool = False ): import seaborn as sns import matplotlib.pyplot as plt assert min_mean >= 0, "min_mean must be positive" plt.ioff() ave = np.log(np.clip( self.mean.astype(dtype=np.float), np.max(np.array([np.nextafter(0, 1), min_mean])), np.inf )) logfc = np.reshape(self.log2_fold_change(), -1) logfc = np.clip(logfc, -log2_fc_threshold, log2_fc_threshold, logfc) fig, ax = plt.subplots() if corrected_pval: pvals = self.pval pvals[np.isnan(pvals)] = 1 is_significant = pvals < alpha else: qvals = self.qval qvals[np.isnan(qvals)] = 1 is_significant = qvals < alpha sns.scatterplot(y=logfc, x=ave, hue=is_significant, ax=ax, legend=False, s=size, palette={True: "orange", False: "black"}) highlight_ids_found = np.array([x in self.gene_ids for x in highlight_ids]) highlight_ids_clean = [highlight_ids[i] for i in np.where(highlight_ids_found)[0]] highlight_ids_not_found = [highlight_ids[i] for i in np.where(np.logical_not(highlight_ids_found))[0]] if len(highlight_ids_not_found) > 0: logger.warning("not all highlight_ids were found in data set: ", ", ".join(highlight_ids_not_found)) if len(highlight_ids_clean) > 0: ave_highlights = np.zeros([len(highlight_ids_clean)]) logfc_highlights = np.zeros([len(highlight_ids_clean)]) is_highlight = np.zeros([len(highlight_ids_clean)]) for i, id_i in enumerate(highlight_ids_clean): idx = np.where(self.gene_ids == id_i)[0] ave_highlights[i] = ave[idx] logfc_highlights[i] = logfc[idx] sns.scatterplot(x=ave_highlights, y=logfc_highlights, hue=is_highlight, ax=ax, legend=False, s=highlight_size, palette={0: highlight_col}) ax.set(xlabel="log mean expression", ylabel="log2FC") if save is not None: plt.savefig(save + suffix) if show: plt.show() plt.close(fig) plt.ion() if return_axs: return ax else: return class _DifferentialExpressionTestSingle(_DifferentialExpressionTest, metaclass=abc.ABCMeta): def summary( self, qval_thres=None, fc_upper_thres=None, fc_lower_thres=None, mean_thres=None, **kwargs ) -> pd.DataFrame: assert self.gene_ids is not None res = pd.DataFrame({ "gene": self.gene_ids, "pval": self.pval, "qval": self.qval, "log2fc": self.log2_fold_change(), "mean": self.mean, "zero_mean": self.mean == 0 }) return res class DifferentialExpressionTestLRT(_DifferentialExpressionTestSingle): sample_description: pd.DataFrame full_design_loc_info: patsy.design_info full_estim: glm.typing.EstimatorBaseTyping reduced_design_loc_info: patsy.design_info reduced_estim: glm.typing.EstimatorBaseTyping def __init__( self, sample_description: pd.DataFrame, full_design_loc_info: patsy.design_info, full_estim: glm.typing.EstimatorBaseTyping, reduced_design_loc_info: patsy.design_info, reduced_estim: glm.typing.EstimatorBaseTyping ): super().__init__() self.sample_description = sample_description self.full_design_loc_info = full_design_loc_info self.full_estim = full_estim self.reduced_design_loc_info = reduced_design_loc_info self.reduced_estim = reduced_estim @property def gene_ids(self) -> np.ndarray: return np.asarray(self.full_estim.input_data.features) @property def x(self): return self.full_estim.x @property def reduced_model_gradient(self): return self.reduced_estim.jacobian @property def full_model_gradient(self): return self.full_estim.jacobian def _test(self): if np.any(self.full_estim.log_likelihood < self.reduced_estim.log_likelihood): logger.warning("Test assumption failed: full model is (partially) less probable than reduced model") return stats.likelihood_ratio_test( ll_full=self.full_estim.log_likelihood, ll_reduced=self.reduced_estim.log_likelihood, df_full=self.full_estim.input_data.constraints_loc.shape[1] + self.full_estim.input_data.constraints_scale.shape[1], df_reduced=self.reduced_estim.input_data.constraints_loc.shape[1] + self.reduced_estim.input_data.constraints_scale.shape[1], ) def _ave(self): return np.asarray(np.mean(self.full_estim.x, axis=0)).flatten() def _log_fold_change(self, factors: Union[Dict, Tuple, Set, List], base=np.e): if not (isinstance(factors, list) or isinstance(factors, tuple) or isinstance(factors, set)): factors = {factors} if not isinstance(factors, set): factors = set(factors) di = self.full_design_loc_info sample_description = self.sample_description[[f.name() for f in di.subset(factors).factor_infos]] dmat = self.full_estim.input_data.design_loc dmat, sample_description = dmat_unique(dmat, sample_description) cols = np.arange(len(di.column_names)) sel = np.concatenate([cols[di.slice(f)] for f in factors], axis=0) neg_sel = np.ones_like(cols).astype(bool) neg_sel[sel] = False dmat[:, neg_sel] = 0 dmat, sample_description = dmat_unique(dmat, sample_description) locations = self.full_estim.model.inverse_link_loc(np.matmul(dmat, self.full_estim.model.a)) locations = np.log(locations) / np.log(base) dist = np.expand_dims(locations, axis=0) dist = np.transpose(dist, [1, 0, 2]) - dist return dist def log_fold_change(self, base=np.e, return_type="vector"): factors = set(self.full_design_loc_info.term_names) - set(self.reduced_design_loc_info.term_names) if return_type == "dataframe": dists = self._log_fold_change(factors=factors, base=base) df = dists.to_dataframe("logFC") df = df.reset_index().drop(["minuend", "subtrahend"], axis=1, errors="ignore") return df elif return_type == "vector": if len(factors) > 1 or self.sample_description[list(factors)].drop_duplicates().shape[0] != 2: return None else: dists = self._log_fold_change(factors=factors, base=base) return dists[1, 0] else: dists = self._log_fold_change(factors=factors, base=base) return dists
BSD 3-Clause New or Revised License
lisa-lab/pylearn2
pylearn2/dataset_get/dataset-get.py
unpack_tarball
python
def unpack_tarball( tar_filename, dest_path ): if os.path.exists(tar_filename): if file_access_rights(dest_path,os.W_OK,check_above=False): try: this_tar_file=tarfile.open(tar_filename,"r:bz2") except Exception as e: raise IOError("[tar] cannot open '%s'" % tar_filename) else: try: this_tar_file.extractall(dest_path) except Exception as e: raise IOError("[tar] error while extracting '%s'" %tar_filename) else: pass else: raise IOError("[tar] no right access to '%s'" % dest_path) else: raise IOError("'%s' not found" % tar_filename)
Unpacks a (bzipped2) tarball to a destination directory :param tar_filename: the bzipped2 tar file :param dest_path: a path to where expand the tarball :raises: various IOErrors
https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/dataset_get/dataset-get.py#L543-L575
from __future__ import print_function __authors__ = "Steven Pigeon" __copyright__ = "(c) 2012, Université de Montréal" __contact__ = "Steven Pigeon: [email protected]" __version__ = "dataset-get 0.1" __licence__ = "BSD 3-Clause http://www.opensource.org/licenses/BSD-3-Clause " import logging import re,os,sys,shutil,time import warnings import urllib,urllib2 import tarfile import subprocess from theano.compat.six.moves import input logger = logging.getLogger(__name__) class package_info: def __init__(self, cf, name,ts,rs,src,whr): self.configuration_file=cf self.name=name self.timestamp=int(ts) self.readable_size=rs self.source=src self.where=whr dataset_sources="sources.lst" dataset_web="http://www.stevenpigeon.org/secret" dataset_conf_path="" dataset_data_path="" root_conf_path=None root_data_path=None user_conf_path=None user_data_path=None super_powers=False packages_sources={} installed_packages_list={} def local_path_as_url( filename ): return "file://"+urllib.pathname2url(os.path.abspath(filename)) def has_super_powers(): return os.geteuid()==0 def corename( filename ): f1=None f2=os.path.basename(filename) while f1 != f2: f1=f2 (f2,ext)=os.path.splitext(f1) return f2 def get_timestamp_from_url( url ): obj = urllib2.urlopen( url ) return time.strptime( obj.info()["Last-Modified"], "%a, %d %b %Y %H:%M:%S GMT") def download_from_url( url, filename=None, progress_hook=None ): (temp_filename, headers)=urllib.urlretrieve( url,filename,progress_hook ) return temp_filename def file_access_rights( filename, rights, check_above=False ): if os.path.exists(filename): return os.access(filename, rights) else: if check_above: return os.access(os.path.dirname(os.path.abspath(filename)), rights) else: return False def atomic_replace( src_filename, dst_filename ): def same_fs( filename_a, filename_b): stats_a = os.stat(filename_a) stats_b = os.stat(filename_b) return stats_a.st_dev == stats_b.st_dev; if os.path.exists(dst_filename) and not same_fs(src_filename,dst_filename): dst_path = os.path.dirname(os.path.abspath(dst_filename)) dst_temp_filename=os.tempnam(dst_path); shutil.copy(src_filename, dst_temp_filename) shutil.move(dst_temp_filename,dst_filename) else: shutil.move(src_filename, dst_filename) def set_defaults(): global dataset_conf_path, dataset_data_path, root_conf_path, root_data_path, user_conf_path, super_powers root_conf_path="/etc/pylearn/" root_data_path="/usr/share/pylearn/dataset/" user_conf_path=os.path.join(os.environ["HOME"],".local/share/pylearn/") user_data_path=os.path.join(os.environ["HOME"],".local/share/pylearn/dataset/") if has_super_powers(): dataset_conf_path=root_conf_path dataset_data_path=root_data_path super_powers=True else: dataset_conf_path=user_conf_path dataset_data_path=user_data_path super_powers=False if not os.path.exists(dataset_conf_path): os.makedirs(dataset_conf_path) if not os.path.exists(os.path.join(dataset_conf_path,dataset_sources)): atomic_update(os.path.join(dataset_web,dataset_sources), os.path.join(dataset_conf_path,dataset_sources), progress_bar) if not os.path.exists(dataset_data_path): os.makedirs(dataset_data_path) read_packages_sources() read_installed_packages_list(); def read_packages_sources(): def read_from_file(config_filename): global packages_sources try: f=open(config_filename,"r") except Exception as e: pass else: for line in f: t=line.rstrip().split(' ') packages_sources[t[0]]= this_package=package_info( config_filename, t[0], t[1], t[2], urllib.unquote(t[3]), None) if super_powers: read_from_file(os.path.join(dataset_conf_path,dataset_sources)) else: paths=[ os.path.join(root_conf_path,dataset_sources), os.path.join(user_conf_path,dataset_sources) ] try: paths+=[ os.path.join(x,dataset_sources) for x in re.split(":|;",os.environ["PYLEARN2_DATA_PATH"]) ] except Exception: pass for path in paths: read_from_file(path) if len(packages_sources)==0: raise RuntimeError( "[cf] fatal: could not find/read sources.lst (unexpected!)" ) def read_installed_packages_list(): def read_from_file(config_filename): global installed_packages_list try: installed_list_file=open(config_filename) except IOError: pass else: for line in installed_list_file: l=line.rstrip().split(' ') if l: installed_packages_list[l[0]]= this_package=package_info( config_filename, l[0], l[1], l[2], urllib.unquote(l[3]), urllib.unquote(l[4])) else: pass if super_powers: read_from_file(os.path.join(dataset_conf_path,"installed.lst")) else: paths=[ os.path.join(root_conf_path,"installed.lst"), os.path.join(user_conf_path,"installed.lst") ] try: paths+=[ os.path.join(x,"installed.lst") for x in re.split(":|;",os.environ["PYLEARN2_DATA_PATH"]) ] except Exception: pass for path in paths: read_from_file(path) if len(installed_packages_list)==0: logger.warning("[cf] no install.lst found " "(will be created on install/upgrade)") def write_installed_packages_list(): global installed_packages_list try: tmp=open(os.path.join(dataset_conf_path,"installed.lst.2"),"w") except IOError: raise RuntimeError("[cf] fatal: cannot create temp file") else: for package in installed_packages_list.values(): if package.where!=None and file_access_rights(os.path.join(package.where,package.name), os.F_OK | os.R_OK): print( " ".join(map(str,[ package.name, package.timestamp, package.readable_size, urllib.quote(package.source,"/:~"), urllib.quote(package.where,"/:~") ] )), file=tmp) atomic_replace(os.path.join(dataset_conf_path,"installed.lst.2"), os.path.join(dataset_conf_path,"installed.lst")) def atomic_update( remote_src, local_dst, hook=None ): global hook_download_filename try: remote_date = get_timestamp_from_url(remote_src); except IOError as e: raise IOError("[ts] %s %s" % (str(e),remote_src)) else: if os.path.exists(local_dst): try: local_date = get_timestamp_from_url(local_path_as_url(local_dst)) except Exception as e: raise IOError("[ts] %s %s" % (str(e),local_dst)) else: if (local_date<remote_date): if file_access_rights(local_dst,os.W_OK,check_above=True): try: hook_download_filename=remote_src temp_filename=download_from_url(remote_src, filename=None, progress_hook=hook) except Exception as e: raise IOError("[dl] %s %s" % (str(e),remote_src)) else: try: atomic_replace(temp_filename,local_dst) except Exception as e: raise IOError("[ac] %s %s --> %s" % (str(e),temp_filename,local_dst)) else: raise IOError("[rw] no write access to %s " % local_dst ) else: pass else: if file_access_rights(local_dst,os.W_OK,check_above=True): try: hook_download_filename=remote_src temp_filename=download_from_url(remote_src, filename=None, progress_hook=hook) except Exception as e: raise IOError("[dl] %s %s" % (str(e),remote_src)) else: try: atomic_replace(temp_filename,local_dst) except Exception as e: raise IOError("[ac] %s %s --> %s" % (str(e),temp_filename,local_dst)) else: raise IOError("[rw] no right access to %s" % local_dst)
BSD 3-Clause New or Revised License
ankush-me/synthtext
synthgen.py
get_text_placement_mask
python
def get_text_placement_mask(xyz,mask,plane,pad=2,viz=False): _, contour, hier = cv2.findContours(mask.copy().astype('uint8'), mode=cv2.RETR_CCOMP, method=cv2.CHAIN_APPROX_SIMPLE) contour = [np.squeeze(c).astype('float') for c in contour] H,W = mask.shape[:2] pts,pts_fp = [],[] center = np.array([W,H])/2 n_front = np.array([0.0,0.0,-1.0]) for i in xrange(len(contour)): cnt_ij = contour[i] xyz = su.DepthCamera.plane2xyz(center, cnt_ij, plane) R = su.rot3d(plane[:3],n_front) xyz = xyz.dot(R.T) pts_fp.append(xyz[:,:2]) pts.append(cnt_ij) rect = cv2.minAreaRect(pts_fp[0].copy().astype('float32')) box = np.array(cv2.boxPoints(rect)) R2d = su.unrotate2d(box.copy()) box = np.vstack([box,box[0,:]]) mu = np.median(pts_fp[0],axis=0) pts_tmp = (pts_fp[0]-mu[None,:]).dot(R2d.T) + mu[None,:] boxR = (box-mu[None,:]).dot(R2d.T) + mu[None,:] s = rescale_frontoparallel(pts_tmp,boxR,pts[0]) boxR *= s for i in xrange(len(pts_fp)): pts_fp[i] = s*((pts_fp[i]-mu[None,:]).dot(R2d.T) + mu[None,:]) minxy = -np.min(boxR,axis=0) + pad//2 ROW = np.max(ssd.pdist(np.atleast_2d(boxR[:,0]).T)) COL = np.max(ssd.pdist(np.atleast_2d(boxR[:,1]).T)) place_mask = 255*np.ones((int(np.ceil(COL))+pad, int(np.ceil(ROW))+pad), 'uint8') pts_fp_i32 = [(pts_fp[i]+minxy[None,:]).astype('int32') for i in xrange(len(pts_fp))] cv2.drawContours(place_mask,pts_fp_i32,-1,0, thickness=cv2.FILLED, lineType=8,hierarchy=hier) if not TextRegions.filter_rectified((~place_mask).astype('float')/255): return H,_ = cv2.findHomography(pts[0].astype('float32').copy(), pts_fp_i32[0].astype('float32').copy(), method=0) Hinv,_ = cv2.findHomography(pts_fp_i32[0].astype('float32').copy(), pts[0].astype('float32').copy(), method=0) if viz: plt.subplot(1,2,1) plt.imshow(mask) plt.subplot(1,2,2) plt.imshow(~place_mask) plt.hold(True) for i in xrange(len(pts_fp_i32)): plt.scatter(pts_fp_i32[i][:,0],pts_fp_i32[i][:,1], edgecolors='none',facecolor='g',alpha=0.5) plt.show() return place_mask,H,Hinv
Returns a binary mask in which text can be placed. Also returns a homography from original image to this rectified mask. XYZ : (HxWx3) image xyz coordinates MASK : (HxW) : non-zero pixels mark the object mask REGION : DICT output of TextRegions.get_regions PAD : number of pixels to pad the placement-mask by
https://github.com/ankush-me/synthtext/blob/5687aa78ddf8714fc01ef8c043dd40af1cd09115/synthgen.py#L207-L286
from __future__ import division import copy import cv2 import h5py from PIL import Image import numpy as np import matplotlib.pyplot as plt import os.path as osp import scipy.ndimage as sim import scipy.spatial.distance as ssd import synth_utils as su import text_utils as tu from colorize3_poisson import Colorize from common import * import traceback, itertools class TextRegions(object): minWidth = 30 minHeight = 30 minAspect = 0.3 maxAspect = 7 minArea = 100 pArea = 0.60 dist_thresh = 0.10 num_inlier = 90 ransac_fit_trials = 100 min_z_projection = 0.25 minW = 20 @staticmethod def filter_rectified(mask): wx = np.median(np.sum(mask,axis=0)) wy = np.median(np.sum(mask,axis=1)) return wx>TextRegions.minW and wy>TextRegions.minW @staticmethod def get_hw(pt,return_rot=False): pt = pt.copy() R = su.unrotate2d(pt) mu = np.median(pt,axis=0) pt = (pt-mu[None,:]).dot(R.T) + mu[None,:] h,w = np.max(pt,axis=0) - np.min(pt,axis=0) if return_rot: return h,w,R return h,w @staticmethod def filter(seg,area,label): good = label[area > TextRegions.minArea] area = area[area > TextRegions.minArea] filt,R = [],[] for idx,i in enumerate(good): mask = seg==i xs,ys = np.where(mask) coords = np.c_[xs,ys].astype('float32') rect = cv2.minAreaRect(coords) box = np.array(cv2.boxPoints(rect)) h,w,rot = TextRegions.get_hw(box,return_rot=True) f = (h > TextRegions.minHeight and w > TextRegions.minWidth and TextRegions.minAspect < w/h < TextRegions.maxAspect and area[idx]/w*h > TextRegions.pArea) filt.append(f) R.append(rot) filt = np.array(filt) area = area[filt] R = [R[i] for i in xrange(len(R)) if filt[i]] aidx = np.argsort(-area) good = good[filt][aidx] R = [R[i] for i in aidx] filter_info = {'label':good, 'rot':R, 'area': area[aidx]} return filter_info @staticmethod def sample_grid_neighbours(mask,nsample,step=3): if 2*step >= min(mask.shape[:2]): return y_m,x_m = np.where(mask) mask_idx = np.zeros_like(mask,'int32') for i in xrange(len(y_m)): mask_idx[y_m[i],x_m[i]] = i xp,xn = np.zeros_like(mask), np.zeros_like(mask) yp,yn = np.zeros_like(mask), np.zeros_like(mask) xp[:,:-2*step] = mask[:,2*step:] xn[:,2*step:] = mask[:,:-2*step] yp[:-2*step,:] = mask[2*step:,:] yn[2*step:,:] = mask[:-2*step,:] valid = mask&xp&xn&yp&yn ys,xs = np.where(valid) N = len(ys) if N==0: return nsample = min(nsample,N) idx = np.random.choice(N,nsample,replace=False) xs,ys = xs[idx],ys[idx] s = step X = np.transpose(np.c_[xs,xs+s,xs+s,xs-s,xs-s][:,:,None],(1,2,0)) Y = np.transpose(np.c_[ys,ys+s,ys-s,ys+s,ys-s][:,:,None],(1,2,0)) sample_idx = np.concatenate([Y,X],axis=1) mask_nn_idx = np.zeros((5,sample_idx.shape[-1]),'int32') for i in xrange(sample_idx.shape[-1]): mask_nn_idx[:,i] = mask_idx[sample_idx[:,:,i][:,0],sample_idx[:,:,i][:,1]] return mask_nn_idx @staticmethod def filter_depth(xyz,seg,regions): plane_info = {'label':[], 'coeff':[], 'support':[], 'rot':[], 'area':[]} for idx,l in enumerate(regions['label']): mask = seg==l pt_sample = TextRegions.sample_grid_neighbours(mask,TextRegions.ransac_fit_trials,step=3) if pt_sample is None: continue pt = xyz[mask] plane_model = su.isplanar(pt, pt_sample, TextRegions.dist_thresh, TextRegions.num_inlier, TextRegions.min_z_projection) if plane_model is not None: plane_coeff = plane_model[0] if np.abs(plane_coeff[2])>TextRegions.min_z_projection: plane_info['label'].append(l) plane_info['coeff'].append(plane_model[0]) plane_info['support'].append(plane_model[1]) plane_info['rot'].append(regions['rot'][idx]) plane_info['area'].append(regions['area'][idx]) return plane_info @staticmethod def get_regions(xyz,seg,area,label): regions = TextRegions.filter(seg,area,label) regions = TextRegions.filter_depth(xyz,seg,regions) return regions def rescale_frontoparallel(p_fp,box_fp,p_im): l1 = np.linalg.norm(box_fp[1,:]-box_fp[0,:]) l2 = np.linalg.norm(box_fp[1,:]-box_fp[2,:]) n0 = np.argmin(np.linalg.norm(p_fp-box_fp[0,:][None,:],axis=1)) n1 = np.argmin(np.linalg.norm(p_fp-box_fp[1,:][None,:],axis=1)) n2 = np.argmin(np.linalg.norm(p_fp-box_fp[2,:][None,:],axis=1)) lt1 = np.linalg.norm(p_im[n1,:]-p_im[n0,:]) lt2 = np.linalg.norm(p_im[n1,:]-p_im[n2,:]) s = max(lt1/l1,lt2/l2) if not np.isfinite(s): s = 1.0 return s
Apache License 2.0
opennetworkingfoundation/tapi
RI/flask_server/tapi_server/models/tapi_oam_get_oam_job.py
TapiOamGetOamJob.__init__
python
def __init__(self, output=None): self.openapi_types = { 'output': TapiOamGetoamjobOutput } self.attribute_map = { 'output': 'output' } self._output = output
TapiOamGetOamJob - a model defined in OpenAPI :param output: The output of this TapiOamGetOamJob. # noqa: E501 :type output: TapiOamGetoamjobOutput
https://github.com/opennetworkingfoundation/tapi/blob/1f3fd9483d5674552c5a31206c97399c8c151897/RI/flask_server/tapi_server/models/tapi_oam_get_oam_job.py#L19-L33
from __future__ import absolute_import from datetime import date, datetime from typing import List, Dict from tapi_server.models.base_model_ import Model from tapi_server.models.tapi_oam_getoamjob_output import TapiOamGetoamjobOutput from tapi_server import util class TapiOamGetOamJob(Model):
Apache License 2.0
nipy/nilabels
nilabels/tools/caliber/volumes_and_values.py
get_volumes_per_label
python
def get_volumes_per_label(im_segm, labels, labels_names, tot_volume_prior=None, verbose=0): num_non_zero_voxels = get_total_num_nonzero_voxels(im_segm) vol_non_zero_voxels_mm3 = num_non_zero_voxels * one_voxel_volume(im_segm) if tot_volume_prior is None: tot_volume_prior = vol_non_zero_voxels_mm3 if labels_names not in [None, 'all', 'tot']: if len(labels) != len(labels_names): raise IOError('Inconsistent labels - labels_names input.') if labels_names == 'all': labels_names = ['reg {}'.format(l) for l in labels] if labels_names == 'tot': labels_names = ['tot'] non_zero_voxels = np.count_nonzero(im_segm.get_data()) volumes = non_zero_voxels * one_voxel_volume(im_segm) vol_over_tot = volumes / float(tot_volume_prior) data_frame = pa.DataFrame({'Num voxels': pa.Series([non_zero_voxels], index=labels_names), 'Volume': pa.Series([volumes], index=labels_names), 'Vol over Tot': pa.Series([vol_over_tot], index=labels_names)}) return data_frame else: non_zero_voxels_list = [] volumes_list = [] vol_over_tot_list = [] for label_k in labels: all_places = np.zeros_like(im_segm.get_data(), dtype=np.bool) if isinstance(label_k, int): all_places += im_segm.get_data() == label_k else: for label_k_j in label_k: all_places += im_segm.get_data() == label_k_j flat_volume_voxel = np.nan_to_num((all_places.astype(np.float64)).flatten()) non_zero_voxels = np.count_nonzero(flat_volume_voxel) volumes = non_zero_voxels * one_voxel_volume(im_segm) vol_over_tot = volumes / float(tot_volume_prior) non_zero_voxels_list.append(non_zero_voxels) volumes_list.append(volumes) vol_over_tot_list.append(vol_over_tot) data_frame = pa.DataFrame({'Label' : pa.Series(labels, index=labels_names), 'Num voxels' : pa.Series(non_zero_voxels_list, index=labels_names), 'Volume' : pa.Series(volumes_list, index=labels_names), 'Vol over Tot' : pa.Series(vol_over_tot_list, index=labels_names)}) data_frame = data_frame.rename_axis('Region') data_frame = data_frame.reset_index() if verbose > 0: print(data_frame) return data_frame
Get a separate volume for each label in a data-frame :param im_segm: nibabel segmentation :param labels: labels you want to measure, or 'all' if you want them all or 'tot' to have the total of the non zero labels. :param labels_names: list with the indexes of labels in the final dataframes, corresponding to labels list. :param tot_volume_prior: factor the volumes will be divided with. :param verbose: > 0 will provide the intermediate stepsp :return:
https://github.com/nipy/nilabels/blob/b065febc611eef638785651b4642d53bb61f1321/nilabels/tools/caliber/volumes_and_values.py#L84-L153
import numpy as np import pandas as pa from nilabels.tools.aux_methods.utils_nib import one_voxel_volume def get_total_num_nonzero_voxels(im_segm, list_labels_to_exclude=None): seg = np.copy(im_segm.get_data()) if list_labels_to_exclude is not None: for label_k in list_labels_to_exclude: places = seg != label_k seg = seg * places num_voxels = np.count_nonzero(seg) else: num_voxels = int(np.count_nonzero(im_segm.get_data())) return num_voxels def get_num_voxels_from_labels_list(im_segm, labels_list): num_voxels_per_label = np.zeros(len(labels_list)).astype(np.int64) for k, label_k in enumerate(labels_list): if isinstance(label_k, int): all_places = im_segm.get_data() == label_k num_voxels_per_label[k] = np.count_nonzero(np.nan_to_num(all_places)) elif isinstance(label_k, list): all_places = np.zeros_like(im_segm.get_data(), dtype=np.bool) for label_k_j in label_k: all_places += im_segm.get_data() == label_k_j num_voxels_per_label[k] = np.count_nonzero(np.nan_to_num(all_places)) else: raise IOError('Labels list must be like [1,2,[3,4]], where [3, 4] are considered as a single label.') return num_voxels_per_label def get_values_below_labels_list(im_segm, im_anat, labels_list): assert im_segm.shape == im_anat.shape values_below_each_label = [] for label_k in labels_list: if isinstance(label_k, int): coords = np.where(im_segm.get_data() == label_k) values_below_each_label.append(im_anat.get_data()[coords].flatten()) elif isinstance(label_k, list): vals = np.array([]) for label_k_j in label_k: coords = np.where(im_segm.get_data() == label_k_j) vals = np.concatenate((vals, im_anat.get_data()[coords].flatten()), axis=0) values_below_each_label.append(vals) else: raise IOError('Labels list must be like [1,2,[3,4]], where [3, 4] are considered as a single label.') return values_below_each_label
MIT License
scqubits/scqubits
scqubits/core/qubit_base.py
QuantumSystem.widget
python
def widget(self, params: Dict[str, Any] = None): init_params = params or self.get_initdata() init_params.pop("id_str", None) ui.create_widget( self.set_params, init_params, image_filename=self._image_filename )
Use ipywidgets to modify parameters of class instance
https://github.com/scqubits/scqubits/blob/d8532a3b614e37b1e65b75000493ea2c25c05682/scqubits/core/qubit_base.py#L183-L189
import functools import inspect from abc import ABC, ABCMeta, abstractmethod from typing import ( TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union, overload, ) import matplotlib.pyplot as plt import numpy as np import scipy as sp from matplotlib.axes import Axes from matplotlib.figure import Figure from numpy import ndarray import scqubits.core.constants as constants import scqubits.core.descriptors as descriptors import scqubits.core.units as units import scqubits.settings as settings import scqubits.ui.qubit_widget as ui import scqubits.utils.plotting as plot from scqubits.core.central_dispatch import DispatchClient from scqubits.core.discretization import Grid1d from scqubits.core.storage import DataStore, SpectrumData from scqubits.settings import IN_IPYTHON from scqubits.utils.cpu_switch import get_map_method from scqubits.utils.misc import InfoBar, process_which from scqubits.utils.spectrum_utils import ( get_matrixelement_table, order_eigensystem, recast_esys_mapdata, standardize_sign, ) if IN_IPYTHON: from tqdm.notebook import tqdm else: from tqdm import tqdm if TYPE_CHECKING: from scqubits.core.storage import WaveFunction from typing_extensions import Literal LevelsTuple = Tuple[int, ...] Transition = Tuple[int, int] TransitionsTuple = Tuple[Transition, ...] class QuantumSystem(DispatchClient, ABC): truncated_dim = descriptors.WatchedProperty(int, "QUANTUMSYSTEM_UPDATE") _init_params: List[str] _image_filename: str _evec_dtype: type _sys_type: str _quantumsystem_counter: int = 0 _instance_counter: Dict[str, int] = {} subclasses: List[ABCMeta] = [] def __new__(cls, *args, **kwargs) -> "QuantumSystem": QuantumSystem._quantumsystem_counter += 1 if cls.__name__ not in QuantumSystem._instance_counter: QuantumSystem._instance_counter[cls.__name__] = 1 else: QuantumSystem._instance_counter[cls.__name__] += 1 return super().__new__(cls) def __del__(self) -> None: try: QuantumSystem._quantumsystem_counter -= 1 except (NameError, AttributeError): pass def __init__(self, id_str: Union[str, None]): self._sys_type = type(self).__name__ self._id_str = id_str or self._autogenerate_id_str() def __init_subclass__(cls): super().__init_subclass__() if not inspect.isabstract(cls): cls.subclasses.append(cls) def __repr__(self) -> str: if hasattr(self, "_init_params"): init_names = self._init_params else: init_names = list(inspect.signature(self.__init__).parameters.keys())[1:] init_dict = {name: getattr(self, name) for name in init_names} return type(self).__name__ + f"(**{init_dict!r})" def __str__(self) -> str: indent_length = 20 name_prepend = self._sys_type.ljust(indent_length, "-") + "| [{}]\n".format( self._id_str ) output = "" for param_name in self.default_params().keys(): output += "{0}| {1}: {2}\n".format( " " * indent_length, str(param_name), str(getattr(self, param_name)) ) output += "{0}|\n".format(" " * indent_length) output += "{0}| dim: {1}\n".format(" " * indent_length, str(self.hilbertdim())) return name_prepend + output def __eq__(self, other: Any): if not isinstance(other, type(self)): return False return self.__dict__ == other.__dict__ def __hash__(self): return super().__hash__() def _autogenerate_id_str(self): name = self._sys_type return "{}_{}".format(name, QuantumSystem._instance_counter[name]) @property def id_str(self): return self._id_str def get_initdata(self) -> Dict[str, Any]: return {name: getattr(self, name) for name in self._init_params} @abstractmethod def hilbertdim(self) -> int: @classmethod def create(cls) -> "QuantumSystem": init_params = cls.default_params() instance = cls(**init_params) instance.widget() return instance
BSD 3-Clause New or Revised License
gepd/deviot
libraries/preferences_bridge.py
PreferencesBridge.get_selected_boards
python
def get_selected_boards(self): settings = get_setting('boards', []) boards = self.get_envs_initialized() if(boards): settings.extend(boards) if(settings): settings = list(set(settings)) return settings
Get Board/s List of all boards in the project, the list includes the one selected in deviot, and the one initialized in the platformio.ini file, they're mixed and excluding the duplicates Returns: list -- list of boards
https://github.com/gepd/deviot/blob/150caea06108369b30210eb287a580fcff4904af/libraries/preferences_bridge.py#L60-L79
from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals from ..api import deviot from .tools import get_setting, save_setting from ..platformio.pio_bridge import PioBridge from ..libraries.readconfig import ReadConfig logger = deviot.create_logger('Deviot') class PreferencesBridge(PioBridge): COMPILE = 1 UPLOAD = 2 def __init__(self): super(PreferencesBridge, self).__init__() deviot.set_logger_level() self.init_option = None def save_selected_board(self, board_id): settings = get_setting('boards', []) save_flag = True if(not settings): settings.append(board_id) else: if(board_id not in settings): settings.append(board_id) else: settings.remove(board_id) try: self.remove_ini_environment(board_id) except: pass if(len(settings) > 0): board_id = settings[-1] else: board_id = '' save_setting('boards', settings) self.save_environment(board_id)
Apache License 2.0