#! /usr/libexec/platform-python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
from io import StringIO, BytesIO, TextIOWrapper
from collections import Mapping
from email.parser import FeedParser
from email.message import Message
from warnings import warn
__all__ = ["MiniFieldStorage", "FieldStorage",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "test", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
global log, logfile, logfp
if logfile and not logfp:
logfp = open(logfile, "a")
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
"""Dummy function, assigned to log when logging is disabled."""
"""Close the log file."""
global log, logfile, logfp
log = initlog # The current logging function
# Maximum input we will accept when REQUEST_METHOD is POST
def parse(fp=None, environ=os.environ, keep_blank_values=0,
strict_parsing=0, separator=None):
"""Parse a query in the environment or from a file (default stdin)
fp : file pointer; default: sys.stdin.buffer
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
separator: str. The symbol to use for separating the query arguments.
# field keys and values (except for files) are returned as strings
# an encoding is required to decode the bytes read from self.fp
if hasattr(fp,'encoding'):
# fp.read() must return bytes
if isinstance(fp, TextIOWrapper):
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict, separator=separator)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError('Maximum content length exceeded')
qs = fp.read(clength).decode(encoding)
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
qs = qs + environ['QUERY_STRING']
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
# parse query string function called from urlparse,
# this is done in order to maintain backward compatibility.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, separator=None):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead",
return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing,
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, separator=None):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead",
return urllib.parse.parse_qsl(qs, keep_blank_values, strict_parsing,
def parse_multipart(fp, pdict):
"""Parse multipart input.
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
XXX This does not parse nested multipart parts -- use FieldStorage for
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError('Invalid boundary in multipart form: %r'
nextpart = b"--" + boundary
lastpart = b"--" + boundary + b"--"
while terminator != lastpart:
# At start of next part. Read headers first.
headers = http.client.parse_headers(fp)
clength = headers.get('content-length')
if maxlen and bytes > maxlen:
raise ValueError('Maximum content length exceeded')
# Read lines until end of part.
terminator = lastpart # End outer loop
if line.startswith(b"--"):
terminator = line.rstrip()
if terminator in (nextpart, lastpart):
# Strip final line terminator
line = headers['content-disposition']
key, params = parse_header(line)
partdict[name].append(data)
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
parts = _parseparam(';' + line)
name = p[:i].strip().lower()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value.replace('\\\\', '\\').replace('\\"', '"')
# Classes for field storage
# =========================
"""Like FieldStorage, for use when no file uploads are possible."""
def __init__(self, name, value):
"""Constructor from field name and value."""
# self.file = StringIO(value)
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data *as
bytes* ; None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes email.message.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
limit=None, encoding='utf-8', errors='replace',
max_num_fields=None, separator=None):
"""Constructor. Read multipart/* until last part.
fp : file pointer; default: sys.stdin.buffer
(not used when the request method is GET)
1. a TextIOWrapper object
2. an object whose read() and readline() methods return bytes
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
limit : used internally to read parts of multipart/form-data forms,
to exit from the reading loop when reached. It is the difference
between the form content-length and the number of bytes already
encoding, errors : the encoding and error handler used to decode the
binary stream to strings. Must be the same as the charset defined
for the page sending the form (content-type : meta http-equiv or
max_num_fields: int. If set, then __init__ throws a ValueError
if there are more than n fields read by parse_qsl().
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
self.max_num_fields = max_num_fields
self.separator = separator
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape')
headers = {'content-type':
"application/x-www-form-urlencoded"}
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
if not (isinstance(headers, (Mapping, Message))):
raise TypeError("headers must be mapping or an instance of "
self.fp = sys.stdin.buffer
# self.fp.read() must return bytes
elif isinstance(fp, TextIOWrapper):
if not (hasattr(fp, 'read') and hasattr(fp, 'readline')):
raise TypeError("fp must be file pointer")