diff --git a/RASP_support/DrawCompFlow.py b/RASP_support/DrawCompFlow.py index ff2431b..58a8f72 100644 --- a/RASP_support/DrawCompFlow.py +++ b/RASP_support/DrawCompFlow.py @@ -75,7 +75,7 @@ def makeQKStable(qvars, kvars, select, ref_in_g): # select has qvars along the rows and kvars along the columns, so we'll do # the same. i.e. top rows will just be the kvars and first columns will # just be the qvars. - # if (not qvars) and (not kvars): + # if (not qvars) and (not kvars): # # no qvars or kvars -> full select -> dont waste space drawing. # num_rows, num_columns = 0, 0 # pass @@ -582,7 +582,7 @@ def draw_comp_flow(self, w, filename=None, # (though it will not be able to draw computation flows without it) from graphviz import Digraph g = Digraph('g') - # with curved lines it fusses over separating score edges + # with curved lines it fusses over separating score edges # and makes weirdly curved ones that start overlapping with the sequences # :( g.attr(splines='polyline') diff --git a/RASP_support/Environment.py b/RASP_support/Environment.py index 19a1a4b..e0de03b 100644 --- a/RASP_support/Environment.py +++ b/RASP_support/Environment.py @@ -46,7 +46,7 @@ def carefulcopy(val): if isinstance(val, Unfinished) or isinstance(val, RASPFunction): return val # non mutable, at least not through rasp commands elif isinstance(val, float) or isinstance(val, int) \ - or isinstance(val, str) or isinstance(val, bool): + or isinstance(val, str) or isinstance(val, bool): return val # non mutable elif isinstance(val, list): return [carefulcopy(v) for v in val] diff --git a/RASP_support/Evaluator.py b/RASP_support/Evaluator.py index 0f20bd1..76fa984 100644 --- a/RASP_support/Evaluator.py +++ b/RASP_support/Evaluator.py @@ -202,7 +202,7 @@ def _set_iterator_and_vals(self, iterator_names, iterator_vals): if len(iterator_names) == 1: self.env.set_variable(iterator_names[0], iterator_vals) elif isinstance(iterator_vals, Iterable) \ - and (len(iterator_vals) == len(iterator_names)): + and (len(iterator_vals) == len(iterator_names)): for n, v in zip(iterator_names, iterator_vals): self.env.set_variable(n, v) else: @@ -248,7 +248,7 @@ def _evaluateListComp(self, ast): for vals in ll: orig_env = self.env self.env = self.env.make_nested() - # sets inside the now-nested env -don't want to keep + # sets inside the now-nested env -don't want to keep # the internal iterators after finishing this list comp self._set_iterator_and_vals(iterator_names, vals) res.append(self.evaluateExpr(ast.val)) @@ -630,7 +630,7 @@ def _test_res(self, res): def succeeds_with(exampe): try: res(example, just_pass_exception_up=True) - except: + except Exception: return False else: return True diff --git a/RASP_support/FunctionalSupport.py b/RASP_support/FunctionalSupport.py index c287d5e..dd513c8 100644 --- a/RASP_support/FunctionalSupport.py +++ b/RASP_support/FunctionalSupport.py @@ -90,7 +90,7 @@ def get_parents(self): for p in other_parents: # recursion: branch back through all the parents of the unf, # always stopping wherever hit something 'real' ie a select or - # a sequence + # a sequence res += p.get_parents() # nothing is made from more than one select... assert len( @@ -147,7 +147,7 @@ def get_full_parents(self, recurse=False, just_compute=False, for p in self.get_sorted_full_parents(): p.get_full_parents(recurse=True, just_compute=True) # have them all compute their full parents so they are - # ready for the future, but only do this in sorted order, + # ready for the future, but only do this in sorted order, # so recursion is always shallow. (always gets shorted with # self._full_parents, which is being computed here for each # unfinished starting from the top of the computation @@ -277,12 +277,12 @@ def __init__(self, parents_tuple, parents2self, from_zipmap=False, output_index=-1, definitely_uses_identity_function=False): # min_poss_depth=0 starts all of the base sequences (eg indices) off - # right. + # right. # might have got none from some default value, fix it before continuing # because later things eg DrawCompFlow will expect name to be str if name is None: - name = plain_unfinished_sequence_name + name = plain_unfinished_sequence_name super(UnfinishedSequence, self).__init__(parents_tuple, parents2self, name=name, min_poss_depth=min_poss_depth) @@ -441,13 +441,13 @@ def select(q_vars, k_vars, selector, name=None, compare_string=None): # helpful for the user so consider maybe adding a tiny bit of mess here # (including markings inside sequences and selectors so they know which # index they're gathering to and from) to allow it - + # we're ok with getting a single q or k var, not in a tuple, # but important to fix it before '+' on two UnfinishedSequences # (as opposed to two tuples) sends everything sideways q_vars = tupleise(q_vars) k_vars = tupleise(k_vars) - + # attn layer is one after values it needs to be calculated new_depth = _min_poss_depth(q_vars+k_vars)+1 res = UnfinishedSelect((_input, # need input seq length to create select @@ -548,19 +548,19 @@ def parents2res(w, vt): return _zipmap(len(w), vt, elementwise_function) # you can do it in the embedding # if len(sequences_tuple)>0: # min_poss_depth = max(min_poss_depth,1) # except for the very specific - # # case where it is the very first thing to be done, in which case we do - # # have to go through one layer to get to the first feedforward. - # # the 'if' is there to rule out increasing when doing a feedforward on - # # nothing, ie, when making a constant. constants are allowed to be - # # created on layer 0, they're part of the embedding or the weights that - # # will use them later or whatever, it's fine - + # # case where it is the very first thing to be done, in which case we do + # # have to go through one layer to get to the first feedforward. + # # the 'if' is there to rule out increasing when doing a feedforward on + # # nothing, ie, when making a constant. constants are allowed to be + # # created on layer 0, they're part of the embedding or the weights that + # # will use them later or whatever, it's fine + # at least as deep as needed MVs, but no deeper cause FF # (which happens at end of layer) return format_output(parents_tuple, parents2res, name, min_poss_depth=min_poss_depth, elementwise_function=elementwise_function, - from_zipmap=True) + from_zipmap=True) def aggregate(select, sequences_tuple, elementwise_function=None, @@ -574,7 +574,7 @@ def aggregate(select, sequences_tuple, elementwise_function=None, def parents2res(s, vt): return _aggregate( s, vt, elementwise_function, default=default) def_uses = definitely_uses_identity_function - + # at least as deep as needed attention and at least one deeper than needed # MVs return format_output(parents_tuple, parents2res, name, @@ -583,7 +583,7 @@ def parents2res(s, vt): return _aggregate( min_poss_depth=max(_min_poss_depth( sequences_tuple)+1, select.min_poss_depth), definitely_uses_identity_function=def_uses) - + # up to here was just plain transformer 'assembly'. any addition is a lie # now begin the bells and whistles diff --git a/RASP_support/REPL.py b/RASP_support/REPL.py index 9bff3a7..174c141 100644 --- a/RASP_support/REPL.py +++ b/RASP_support/REPL.py @@ -491,7 +491,7 @@ def get_input_tree(self): if isinstance(newinput, Stop): # input stream ended return Stop() if is_comment(newinput): - # don't let comments get in and ruin things somehow + # don't let comments get in and ruin things somehow newinput = "" # don't replace newlines here! this is how in-function comments get # broken diff --git a/RASP_support/Sugar.py b/RASP_support/Sugar.py index 0923e75..a297eeb 100644 --- a/RASP_support/Sugar.py +++ b/RASP_support/Sugar.py @@ -7,6 +7,7 @@ # top-level rasp file we import, and nice to have draw_comp_flow added into # the sequences already on load + def _apply_unary_op(self, f): return zipmap(self, f) @@ -70,7 +71,7 @@ def asbool(seq): def tplnot(seq, name=None): # this one does correct conversion using asbool and then we really can just # do ==False - res = asbool(seq) == False + res = asbool(seq) is False return _addname(res, name, "( not " + str(seq.name) + " )") diff --git a/RASP_support/Support.py b/RASP_support/Support.py index feec2e6..6e87514 100644 --- a/RASP_support/Support.py +++ b/RASP_support/Support.py @@ -215,10 +215,10 @@ def prep_default(default, num_output_vars): verify_default_size(default, num_output_vars) if not isinstance(default, tuple): # specifically with how we're going to do things here in the - # average aggregate, will help to actually have the outputs get + # average aggregate, will help to actually have the outputs get # passed around as tuples, even if they're scalars really. # but do this after the size check for the scalar one so it doesn't - # get filled with weird ifs... this tupled scalar thing is only a + # get filled with weird ifs... this tupled scalar thing is only a # convenience in this implementation in this here function default = (default,) return default diff --git a/RASP_support/analyse.py b/RASP_support/analyse.py index a1f4477..62b4c0a 100644 --- a/RASP_support/analyse.py +++ b/RASP_support/analyse.py @@ -235,7 +235,7 @@ def note_if_seeker(self): return if (not self.get_parent_sequences()) \ - and (self.get_parent_select() is not None): + and (self.get_parent_select() is not None): # no parent sequences, but yes parent select: this value is a function # of only its parent select, i.e., a seeker (marks whether select found # something or not) diff --git a/RASP_support/make_operators.py b/RASP_support/make_operators.py index b1a26f7..a015308 100644 --- a/RASP_support/make_operators.py +++ b/RASP_support/make_operators.py @@ -123,7 +123,7 @@ def __rpow__(self, other): return apply_binary_op(self, other, lambda a, b: pow(b, a)) # skipping and, or, xor, which are bitwise and dont implement 'and' and - # 'or' but rather & and |. + # 'or' but rather & and |. # similarly skipping lshift, rshift cause who wants them. # wish i had not, and, or primitives, but can accept that dont. # if people really want to do 'not' they can do '==False' instead, can do a