Edit File by line
/home/barbar84/www/wp-conte.../plugins/sujqvwi/AnonR/smanonr..../lib64/python3....
File: tokenize.py
pos += len(comment_token)
[500] Fix | Delete
[501] Fix | Delete
yield TokenInfo(NL, line[pos:],
[502] Fix | Delete
(lnum, pos), (lnum, len(line)), line)
[503] Fix | Delete
continue
[504] Fix | Delete
[505] Fix | Delete
if column > indents[-1]: # count indents or dedents
[506] Fix | Delete
indents.append(column)
[507] Fix | Delete
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
[508] Fix | Delete
while column < indents[-1]:
[509] Fix | Delete
if column not in indents:
[510] Fix | Delete
raise IndentationError(
[511] Fix | Delete
"unindent does not match any outer indentation level",
[512] Fix | Delete
("<tokenize>", lnum, pos, line))
[513] Fix | Delete
indents = indents[:-1]
[514] Fix | Delete
[515] Fix | Delete
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
[516] Fix | Delete
[517] Fix | Delete
else: # continued statement
[518] Fix | Delete
if not line:
[519] Fix | Delete
raise TokenError("EOF in multi-line statement", (lnum, 0))
[520] Fix | Delete
continued = 0
[521] Fix | Delete
[522] Fix | Delete
while pos < max:
[523] Fix | Delete
pseudomatch = _compile(PseudoToken).match(line, pos)
[524] Fix | Delete
if pseudomatch: # scan for tokens
[525] Fix | Delete
start, end = pseudomatch.span(1)
[526] Fix | Delete
spos, epos, pos = (lnum, start), (lnum, end), end
[527] Fix | Delete
if start == end:
[528] Fix | Delete
continue
[529] Fix | Delete
token, initial = line[start:end], line[start]
[530] Fix | Delete
[531] Fix | Delete
if (initial in numchars or # ordinary number
[532] Fix | Delete
(initial == '.' and token != '.' and token != '...')):
[533] Fix | Delete
yield TokenInfo(NUMBER, token, spos, epos, line)
[534] Fix | Delete
elif initial in '\r\n':
[535] Fix | Delete
if parenlev > 0:
[536] Fix | Delete
yield TokenInfo(NL, token, spos, epos, line)
[537] Fix | Delete
else:
[538] Fix | Delete
yield TokenInfo(NEWLINE, token, spos, epos, line)
[539] Fix | Delete
[540] Fix | Delete
elif initial == '#':
[541] Fix | Delete
assert not token.endswith("\n")
[542] Fix | Delete
yield TokenInfo(COMMENT, token, spos, epos, line)
[543] Fix | Delete
[544] Fix | Delete
elif token in triple_quoted:
[545] Fix | Delete
endprog = _compile(endpats[token])
[546] Fix | Delete
endmatch = endprog.match(line, pos)
[547] Fix | Delete
if endmatch: # all on one line
[548] Fix | Delete
pos = endmatch.end(0)
[549] Fix | Delete
token = line[start:pos]
[550] Fix | Delete
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
[551] Fix | Delete
else:
[552] Fix | Delete
strstart = (lnum, start) # multiple lines
[553] Fix | Delete
contstr = line[start:]
[554] Fix | Delete
contline = line
[555] Fix | Delete
break
[556] Fix | Delete
[557] Fix | Delete
# Check up to the first 3 chars of the token to see if
[558] Fix | Delete
# they're in the single_quoted set. If so, they start
[559] Fix | Delete
# a string.
[560] Fix | Delete
# We're using the first 3, because we're looking for
[561] Fix | Delete
# "rb'" (for example) at the start of the token. If
[562] Fix | Delete
# we switch to longer prefixes, this needs to be
[563] Fix | Delete
# adjusted.
[564] Fix | Delete
# Note that initial == token[:1].
[565] Fix | Delete
# Also note that single quote checking must come after
[566] Fix | Delete
# triple quote checking (above).
[567] Fix | Delete
elif (initial in single_quoted or
[568] Fix | Delete
token[:2] in single_quoted or
[569] Fix | Delete
token[:3] in single_quoted):
[570] Fix | Delete
if token[-1] == '\n': # continued string
[571] Fix | Delete
strstart = (lnum, start)
[572] Fix | Delete
# Again, using the first 3 chars of the
[573] Fix | Delete
# token. This is looking for the matching end
[574] Fix | Delete
# regex for the correct type of quote
[575] Fix | Delete
# character. So it's really looking for
[576] Fix | Delete
# endpats["'"] or endpats['"'], by trying to
[577] Fix | Delete
# skip string prefix characters, if any.
[578] Fix | Delete
endprog = _compile(endpats.get(initial) or
[579] Fix | Delete
endpats.get(token[1]) or
[580] Fix | Delete
endpats.get(token[2]))
[581] Fix | Delete
contstr, needcont = line[start:], 1
[582] Fix | Delete
contline = line
[583] Fix | Delete
break
[584] Fix | Delete
else: # ordinary string
[585] Fix | Delete
yield TokenInfo(STRING, token, spos, epos, line)
[586] Fix | Delete
[587] Fix | Delete
elif initial.isidentifier(): # ordinary name
[588] Fix | Delete
yield TokenInfo(NAME, token, spos, epos, line)
[589] Fix | Delete
elif initial == '\\': # continued stmt
[590] Fix | Delete
continued = 1
[591] Fix | Delete
else:
[592] Fix | Delete
if initial in '([{':
[593] Fix | Delete
parenlev += 1
[594] Fix | Delete
elif initial in ')]}':
[595] Fix | Delete
parenlev -= 1
[596] Fix | Delete
yield TokenInfo(OP, token, spos, epos, line)
[597] Fix | Delete
else:
[598] Fix | Delete
yield TokenInfo(ERRORTOKEN, line[pos],
[599] Fix | Delete
(lnum, pos), (lnum, pos+1), line)
[600] Fix | Delete
pos += 1
[601] Fix | Delete
[602] Fix | Delete
# Add an implicit NEWLINE if the input doesn't end in one
[603] Fix | Delete
if last_line and last_line[-1] not in '\r\n':
[604] Fix | Delete
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
[605] Fix | Delete
for indent in indents[1:]: # pop remaining indent levels
[606] Fix | Delete
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
[607] Fix | Delete
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
[608] Fix | Delete
[609] Fix | Delete
[610] Fix | Delete
def generate_tokens(readline):
[611] Fix | Delete
"""Tokenize a source reading Python code as unicode strings.
[612] Fix | Delete
[613] Fix | Delete
This has the same API as tokenize(), except that it expects the *readline*
[614] Fix | Delete
callable to return str objects instead of bytes.
[615] Fix | Delete
"""
[616] Fix | Delete
return _tokenize(readline, None)
[617] Fix | Delete
[618] Fix | Delete
def main():
[619] Fix | Delete
import argparse
[620] Fix | Delete
[621] Fix | Delete
# Helper error handling routines
[622] Fix | Delete
def perror(message):
[623] Fix | Delete
sys.stderr.write(message)
[624] Fix | Delete
sys.stderr.write('\n')
[625] Fix | Delete
[626] Fix | Delete
def error(message, filename=None, location=None):
[627] Fix | Delete
if location:
[628] Fix | Delete
args = (filename,) + location + (message,)
[629] Fix | Delete
perror("%s:%d:%d: error: %s" % args)
[630] Fix | Delete
elif filename:
[631] Fix | Delete
perror("%s: error: %s" % (filename, message))
[632] Fix | Delete
else:
[633] Fix | Delete
perror("error: %s" % message)
[634] Fix | Delete
sys.exit(1)
[635] Fix | Delete
[636] Fix | Delete
# Parse the arguments and options
[637] Fix | Delete
parser = argparse.ArgumentParser(prog='python -m tokenize')
[638] Fix | Delete
parser.add_argument(dest='filename', nargs='?',
[639] Fix | Delete
metavar='filename.py',
[640] Fix | Delete
help='the file to tokenize; defaults to stdin')
[641] Fix | Delete
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
[642] Fix | Delete
help='display token names using the exact type')
[643] Fix | Delete
args = parser.parse_args()
[644] Fix | Delete
[645] Fix | Delete
try:
[646] Fix | Delete
# Tokenize the input
[647] Fix | Delete
if args.filename:
[648] Fix | Delete
filename = args.filename
[649] Fix | Delete
with _builtin_open(filename, 'rb') as f:
[650] Fix | Delete
tokens = list(tokenize(f.readline))
[651] Fix | Delete
else:
[652] Fix | Delete
filename = "<stdin>"
[653] Fix | Delete
tokens = _tokenize(sys.stdin.readline, None)
[654] Fix | Delete
[655] Fix | Delete
# Output the tokenization
[656] Fix | Delete
for token in tokens:
[657] Fix | Delete
token_type = token.type
[658] Fix | Delete
if args.exact:
[659] Fix | Delete
token_type = token.exact_type
[660] Fix | Delete
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
[661] Fix | Delete
print("%-20s%-15s%-15r" %
[662] Fix | Delete
(token_range, tok_name[token_type], token.string))
[663] Fix | Delete
except IndentationError as err:
[664] Fix | Delete
line, column = err.args[1][1:3]
[665] Fix | Delete
error(err.args[0], filename, (line, column))
[666] Fix | Delete
except TokenError as err:
[667] Fix | Delete
line, column = err.args[1]
[668] Fix | Delete
error(err.args[0], filename, (line, column))
[669] Fix | Delete
except SyntaxError as err:
[670] Fix | Delete
error(err, filename)
[671] Fix | Delete
except OSError as err:
[672] Fix | Delete
error(err)
[673] Fix | Delete
except KeyboardInterrupt:
[674] Fix | Delete
print("interrupted\n")
[675] Fix | Delete
except Exception as err:
[676] Fix | Delete
perror("unexpected error: %s" % err)
[677] Fix | Delete
raise
[678] Fix | Delete
[679] Fix | Delete
if __name__ == "__main__":
[680] Fix | Delete
main()
[681] Fix | Delete
[682] Fix | Delete
12
It is recommended that you Edit text format, this type of Fix handles quite a lot in one request
Function