Edit File by line
/home/barbar84/public_h.../wp-conte.../plugins/sujqvwi/ExeBy/smexe_ro.../lib64/python2....
File: cookielib.py
r"""HTTP cookie handling for web clients.
[0] Fix | Delete
[1] Fix | Delete
This module has (now fairly distant) origins in Gisle Aas' Perl module
[2] Fix | Delete
HTTP::Cookies, from the libwww-perl library.
[3] Fix | Delete
[4] Fix | Delete
Docstrings, comments and debug strings in this code refer to the
[5] Fix | Delete
attributes of the HTTP cookie system as cookie-attributes, to distinguish
[6] Fix | Delete
them clearly from Python attributes.
[7] Fix | Delete
[8] Fix | Delete
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
[9] Fix | Delete
distributed with the Python standard library, but are available from
[10] Fix | Delete
http://wwwsearch.sf.net/):
[11] Fix | Delete
[12] Fix | Delete
CookieJar____
[13] Fix | Delete
/ \ \
[14] Fix | Delete
FileCookieJar \ \
[15] Fix | Delete
/ | \ \ \
[16] Fix | Delete
MozillaCookieJar | LWPCookieJar \ \
[17] Fix | Delete
| | \
[18] Fix | Delete
| ---MSIEBase | \
[19] Fix | Delete
| / | | \
[20] Fix | Delete
| / MSIEDBCookieJar BSDDBCookieJar
[21] Fix | Delete
|/
[22] Fix | Delete
MSIECookieJar
[23] Fix | Delete
[24] Fix | Delete
"""
[25] Fix | Delete
[26] Fix | Delete
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
[27] Fix | Delete
'FileCookieJar', 'LWPCookieJar', 'lwp_cookie_str', 'LoadError',
[28] Fix | Delete
'MozillaCookieJar']
[29] Fix | Delete
[30] Fix | Delete
import re, urlparse, copy, time, urllib
[31] Fix | Delete
try:
[32] Fix | Delete
import threading as _threading
[33] Fix | Delete
except ImportError:
[34] Fix | Delete
import dummy_threading as _threading
[35] Fix | Delete
import httplib # only for the default HTTP port
[36] Fix | Delete
from calendar import timegm
[37] Fix | Delete
[38] Fix | Delete
debug = False # set to True to enable debugging via the logging module
[39] Fix | Delete
logger = None
[40] Fix | Delete
[41] Fix | Delete
def _debug(*args):
[42] Fix | Delete
if not debug:
[43] Fix | Delete
return
[44] Fix | Delete
global logger
[45] Fix | Delete
if not logger:
[46] Fix | Delete
import logging
[47] Fix | Delete
logger = logging.getLogger("cookielib")
[48] Fix | Delete
return logger.debug(*args)
[49] Fix | Delete
[50] Fix | Delete
[51] Fix | Delete
DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
[52] Fix | Delete
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
[53] Fix | Delete
"instance initialised with one)")
[54] Fix | Delete
[55] Fix | Delete
def _warn_unhandled_exception():
[56] Fix | Delete
# There are a few catch-all except: statements in this module, for
[57] Fix | Delete
# catching input that's bad in unexpected ways. Warn if any
[58] Fix | Delete
# exceptions are caught there.
[59] Fix | Delete
import warnings, traceback, StringIO
[60] Fix | Delete
f = StringIO.StringIO()
[61] Fix | Delete
traceback.print_exc(None, f)
[62] Fix | Delete
msg = f.getvalue()
[63] Fix | Delete
warnings.warn("cookielib bug!\n%s" % msg, stacklevel=2)
[64] Fix | Delete
[65] Fix | Delete
[66] Fix | Delete
# Date/time conversion
[67] Fix | Delete
# -----------------------------------------------------------------------------
[68] Fix | Delete
[69] Fix | Delete
EPOCH_YEAR = 1970
[70] Fix | Delete
def _timegm(tt):
[71] Fix | Delete
year, month, mday, hour, min, sec = tt[:6]
[72] Fix | Delete
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
[73] Fix | Delete
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
[74] Fix | Delete
return timegm(tt)
[75] Fix | Delete
else:
[76] Fix | Delete
return None
[77] Fix | Delete
[78] Fix | Delete
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
[79] Fix | Delete
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
[80] Fix | Delete
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
[81] Fix | Delete
MONTHS_LOWER = []
[82] Fix | Delete
for month in MONTHS: MONTHS_LOWER.append(month.lower())
[83] Fix | Delete
[84] Fix | Delete
def time2isoz(t=None):
[85] Fix | Delete
"""Return a string representing time in seconds since epoch, t.
[86] Fix | Delete
[87] Fix | Delete
If the function is called without an argument, it will use the current
[88] Fix | Delete
time.
[89] Fix | Delete
[90] Fix | Delete
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
[91] Fix | Delete
representing Universal Time (UTC, aka GMT). An example of this format is:
[92] Fix | Delete
[93] Fix | Delete
1994-11-24 08:49:37Z
[94] Fix | Delete
[95] Fix | Delete
"""
[96] Fix | Delete
if t is None: t = time.time()
[97] Fix | Delete
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
[98] Fix | Delete
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
[99] Fix | Delete
year, mon, mday, hour, min, sec)
[100] Fix | Delete
[101] Fix | Delete
def time2netscape(t=None):
[102] Fix | Delete
"""Return a string representing time in seconds since epoch, t.
[103] Fix | Delete
[104] Fix | Delete
If the function is called without an argument, it will use the current
[105] Fix | Delete
time.
[106] Fix | Delete
[107] Fix | Delete
The format of the returned string is like this:
[108] Fix | Delete
[109] Fix | Delete
Wed, DD-Mon-YYYY HH:MM:SS GMT
[110] Fix | Delete
[111] Fix | Delete
"""
[112] Fix | Delete
if t is None: t = time.time()
[113] Fix | Delete
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
[114] Fix | Delete
return "%s, %02d-%s-%04d %02d:%02d:%02d GMT" % (
[115] Fix | Delete
DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec)
[116] Fix | Delete
[117] Fix | Delete
[118] Fix | Delete
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
[119] Fix | Delete
[120] Fix | Delete
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
[121] Fix | Delete
def offset_from_tz_string(tz):
[122] Fix | Delete
offset = None
[123] Fix | Delete
if tz in UTC_ZONES:
[124] Fix | Delete
offset = 0
[125] Fix | Delete
else:
[126] Fix | Delete
m = TIMEZONE_RE.search(tz)
[127] Fix | Delete
if m:
[128] Fix | Delete
offset = 3600 * int(m.group(2))
[129] Fix | Delete
if m.group(3):
[130] Fix | Delete
offset = offset + 60 * int(m.group(3))
[131] Fix | Delete
if m.group(1) == '-':
[132] Fix | Delete
offset = -offset
[133] Fix | Delete
return offset
[134] Fix | Delete
[135] Fix | Delete
def _str2time(day, mon, yr, hr, min, sec, tz):
[136] Fix | Delete
# translate month name to number
[137] Fix | Delete
# month numbers start with 1 (January)
[138] Fix | Delete
try:
[139] Fix | Delete
mon = MONTHS_LOWER.index(mon.lower())+1
[140] Fix | Delete
except ValueError:
[141] Fix | Delete
# maybe it's already a number
[142] Fix | Delete
try:
[143] Fix | Delete
imon = int(mon)
[144] Fix | Delete
except ValueError:
[145] Fix | Delete
return None
[146] Fix | Delete
if 1 <= imon <= 12:
[147] Fix | Delete
mon = imon
[148] Fix | Delete
else:
[149] Fix | Delete
return None
[150] Fix | Delete
[151] Fix | Delete
# make sure clock elements are defined
[152] Fix | Delete
if hr is None: hr = 0
[153] Fix | Delete
if min is None: min = 0
[154] Fix | Delete
if sec is None: sec = 0
[155] Fix | Delete
[156] Fix | Delete
yr = int(yr)
[157] Fix | Delete
day = int(day)
[158] Fix | Delete
hr = int(hr)
[159] Fix | Delete
min = int(min)
[160] Fix | Delete
sec = int(sec)
[161] Fix | Delete
[162] Fix | Delete
if yr < 1000:
[163] Fix | Delete
# find "obvious" year
[164] Fix | Delete
cur_yr = time.localtime(time.time())[0]
[165] Fix | Delete
m = cur_yr % 100
[166] Fix | Delete
tmp = yr
[167] Fix | Delete
yr = yr + cur_yr - m
[168] Fix | Delete
m = m - tmp
[169] Fix | Delete
if abs(m) > 50:
[170] Fix | Delete
if m > 0: yr = yr + 100
[171] Fix | Delete
else: yr = yr - 100
[172] Fix | Delete
[173] Fix | Delete
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
[174] Fix | Delete
t = _timegm((yr, mon, day, hr, min, sec, tz))
[175] Fix | Delete
[176] Fix | Delete
if t is not None:
[177] Fix | Delete
# adjust time using timezone string, to get absolute time since epoch
[178] Fix | Delete
if tz is None:
[179] Fix | Delete
tz = "UTC"
[180] Fix | Delete
tz = tz.upper()
[181] Fix | Delete
offset = offset_from_tz_string(tz)
[182] Fix | Delete
if offset is None:
[183] Fix | Delete
return None
[184] Fix | Delete
t = t - offset
[185] Fix | Delete
[186] Fix | Delete
return t
[187] Fix | Delete
[188] Fix | Delete
STRICT_DATE_RE = re.compile(
[189] Fix | Delete
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
[190] Fix | Delete
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
[191] Fix | Delete
WEEKDAY_RE = re.compile(
[192] Fix | Delete
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
[193] Fix | Delete
LOOSE_HTTP_DATE_RE = re.compile(
[194] Fix | Delete
r"""^
[195] Fix | Delete
(\d\d?) # day
[196] Fix | Delete
(?:\s+|[-\/])
[197] Fix | Delete
(\w+) # month
[198] Fix | Delete
(?:\s+|[-\/])
[199] Fix | Delete
(\d+) # year
[200] Fix | Delete
(?:
[201] Fix | Delete
(?:\s+|:) # separator before clock
[202] Fix | Delete
(\d\d?):(\d\d) # hour:min
[203] Fix | Delete
(?::(\d\d))? # optional seconds
[204] Fix | Delete
)? # optional clock
[205] Fix | Delete
\s*
[206] Fix | Delete
(?:
[207] Fix | Delete
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+) # timezone
[208] Fix | Delete
\s*
[209] Fix | Delete
)?
[210] Fix | Delete
(?:
[211] Fix | Delete
\(\w+\) # ASCII representation of timezone in parens.
[212] Fix | Delete
\s*
[213] Fix | Delete
)?$""", re.X)
[214] Fix | Delete
def http2time(text):
[215] Fix | Delete
"""Returns time in seconds since epoch of time represented by a string.
[216] Fix | Delete
[217] Fix | Delete
Return value is an integer.
[218] Fix | Delete
[219] Fix | Delete
None is returned if the format of str is unrecognized, the time is outside
[220] Fix | Delete
the representable range, or the timezone string is not recognized. If the
[221] Fix | Delete
string contains no timezone, UTC is assumed.
[222] Fix | Delete
[223] Fix | Delete
The timezone in the string may be numerical (like "-0800" or "+0100") or a
[224] Fix | Delete
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
[225] Fix | Delete
timezone strings equivalent to UTC (zero offset) are known to the function.
[226] Fix | Delete
[227] Fix | Delete
The function loosely parses the following formats:
[228] Fix | Delete
[229] Fix | Delete
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
[230] Fix | Delete
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
[231] Fix | Delete
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
[232] Fix | Delete
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
[233] Fix | Delete
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
[234] Fix | Delete
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
[235] Fix | Delete
[236] Fix | Delete
The parser ignores leading and trailing whitespace. The time may be
[237] Fix | Delete
absent.
[238] Fix | Delete
[239] Fix | Delete
If the year is given with only 2 digits, the function will select the
[240] Fix | Delete
century that makes the year closest to the current date.
[241] Fix | Delete
[242] Fix | Delete
"""
[243] Fix | Delete
# fast exit for strictly conforming string
[244] Fix | Delete
m = STRICT_DATE_RE.search(text)
[245] Fix | Delete
if m:
[246] Fix | Delete
g = m.groups()
[247] Fix | Delete
mon = MONTHS_LOWER.index(g[1].lower()) + 1
[248] Fix | Delete
tt = (int(g[2]), mon, int(g[0]),
[249] Fix | Delete
int(g[3]), int(g[4]), float(g[5]))
[250] Fix | Delete
return _timegm(tt)
[251] Fix | Delete
[252] Fix | Delete
# No, we need some messy parsing...
[253] Fix | Delete
[254] Fix | Delete
# clean up
[255] Fix | Delete
text = text.lstrip()
[256] Fix | Delete
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
[257] Fix | Delete
[258] Fix | Delete
# tz is time zone specifier string
[259] Fix | Delete
day, mon, yr, hr, min, sec, tz = [None]*7
[260] Fix | Delete
[261] Fix | Delete
# loose regexp parse
[262] Fix | Delete
m = LOOSE_HTTP_DATE_RE.search(text)
[263] Fix | Delete
if m is not None:
[264] Fix | Delete
day, mon, yr, hr, min, sec, tz = m.groups()
[265] Fix | Delete
else:
[266] Fix | Delete
return None # bad format
[267] Fix | Delete
[268] Fix | Delete
return _str2time(day, mon, yr, hr, min, sec, tz)
[269] Fix | Delete
[270] Fix | Delete
ISO_DATE_RE = re.compile(
[271] Fix | Delete
r"""^
[272] Fix | Delete
(\d{4}) # year
[273] Fix | Delete
[-\/]?
[274] Fix | Delete
(\d\d?) # numerical month
[275] Fix | Delete
[-\/]?
[276] Fix | Delete
(\d\d?) # day
[277] Fix | Delete
(?:
[278] Fix | Delete
(?:\s+|[-:Tt]) # separator before clock
[279] Fix | Delete
(\d\d?):?(\d\d) # hour:min
[280] Fix | Delete
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
[281] Fix | Delete
)? # optional clock
[282] Fix | Delete
\s*
[283] Fix | Delete
(?:
[284] Fix | Delete
([-+]?\d\d?:?(:?\d\d)?
[285] Fix | Delete
|Z|z) # timezone (Z is "zero meridian", i.e. GMT)
[286] Fix | Delete
\s*
[287] Fix | Delete
)?$""", re.X)
[288] Fix | Delete
def iso2time(text):
[289] Fix | Delete
"""
[290] Fix | Delete
As for http2time, but parses the ISO 8601 formats:
[291] Fix | Delete
[292] Fix | Delete
1994-02-03 14:15:29 -0100 -- ISO 8601 format
[293] Fix | Delete
1994-02-03 14:15:29 -- zone is optional
[294] Fix | Delete
1994-02-03 -- only date
[295] Fix | Delete
1994-02-03T14:15:29 -- Use T as separator
[296] Fix | Delete
19940203T141529Z -- ISO 8601 compact format
[297] Fix | Delete
19940203 -- only date
[298] Fix | Delete
[299] Fix | Delete
"""
[300] Fix | Delete
# clean up
[301] Fix | Delete
text = text.lstrip()
[302] Fix | Delete
[303] Fix | Delete
# tz is time zone specifier string
[304] Fix | Delete
day, mon, yr, hr, min, sec, tz = [None]*7
[305] Fix | Delete
[306] Fix | Delete
# loose regexp parse
[307] Fix | Delete
m = ISO_DATE_RE.search(text)
[308] Fix | Delete
if m is not None:
[309] Fix | Delete
# XXX there's an extra bit of the timezone I'm ignoring here: is
[310] Fix | Delete
# this the right thing to do?
[311] Fix | Delete
yr, mon, day, hr, min, sec, tz, _ = m.groups()
[312] Fix | Delete
else:
[313] Fix | Delete
return None # bad format
[314] Fix | Delete
[315] Fix | Delete
return _str2time(day, mon, yr, hr, min, sec, tz)
[316] Fix | Delete
[317] Fix | Delete
[318] Fix | Delete
# Header parsing
[319] Fix | Delete
# -----------------------------------------------------------------------------
[320] Fix | Delete
[321] Fix | Delete
def unmatched(match):
[322] Fix | Delete
"""Return unmatched part of re.Match object."""
[323] Fix | Delete
start, end = match.span(0)
[324] Fix | Delete
return match.string[:start]+match.string[end:]
[325] Fix | Delete
[326] Fix | Delete
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
[327] Fix | Delete
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
[328] Fix | Delete
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
[329] Fix | Delete
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
[330] Fix | Delete
def split_header_words(header_values):
[331] Fix | Delete
r"""Parse header values into a list of lists containing key,value pairs.
[332] Fix | Delete
[333] Fix | Delete
The function knows how to deal with ",", ";" and "=" as well as quoted
[334] Fix | Delete
values after "=". A list of space separated tokens are parsed as if they
[335] Fix | Delete
were separated by ";".
[336] Fix | Delete
[337] Fix | Delete
If the header_values passed as argument contains multiple values, then they
[338] Fix | Delete
are treated as if they were a single value separated by comma ",".
[339] Fix | Delete
[340] Fix | Delete
This means that this function is useful for parsing header fields that
[341] Fix | Delete
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
[342] Fix | Delete
the requirement for tokens).
[343] Fix | Delete
[344] Fix | Delete
headers = #header
[345] Fix | Delete
header = (token | parameter) *( [";"] (token | parameter))
[346] Fix | Delete
[347] Fix | Delete
token = 1*<any CHAR except CTLs or separators>
[348] Fix | Delete
separators = "(" | ")" | "<" | ">" | "@"
[349] Fix | Delete
| "," | ";" | ":" | "\" | <">
[350] Fix | Delete
| "/" | "[" | "]" | "?" | "="
[351] Fix | Delete
| "{" | "}" | SP | HT
[352] Fix | Delete
[353] Fix | Delete
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
[354] Fix | Delete
qdtext = <any TEXT except <">>
[355] Fix | Delete
quoted-pair = "\" CHAR
[356] Fix | Delete
[357] Fix | Delete
parameter = attribute "=" value
[358] Fix | Delete
attribute = token
[359] Fix | Delete
value = token | quoted-string
[360] Fix | Delete
[361] Fix | Delete
Each header is represented by a list of key/value pairs. The value for a
[362] Fix | Delete
simple token (not part of a parameter) is None. Syntactically incorrect
[363] Fix | Delete
headers will not necessarily be parsed as you would want.
[364] Fix | Delete
[365] Fix | Delete
This is easier to describe with some examples:
[366] Fix | Delete
[367] Fix | Delete
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[368] Fix | Delete
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
[369] Fix | Delete
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[370] Fix | Delete
[[('text/html', None), ('charset', 'iso-8859-1')]]
[371] Fix | Delete
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[372] Fix | Delete
[[('Basic', None), ('realm', '"foobar"')]]
[373] Fix | Delete
[374] Fix | Delete
"""
[375] Fix | Delete
assert not isinstance(header_values, basestring)
[376] Fix | Delete
result = []
[377] Fix | Delete
for text in header_values:
[378] Fix | Delete
orig_text = text
[379] Fix | Delete
pairs = []
[380] Fix | Delete
while text:
[381] Fix | Delete
m = HEADER_TOKEN_RE.search(text)
[382] Fix | Delete
if m:
[383] Fix | Delete
text = unmatched(m)
[384] Fix | Delete
name = m.group(1)
[385] Fix | Delete
m = HEADER_QUOTED_VALUE_RE.search(text)
[386] Fix | Delete
if m: # quoted value
[387] Fix | Delete
text = unmatched(m)
[388] Fix | Delete
value = m.group(1)
[389] Fix | Delete
value = HEADER_ESCAPE_RE.sub(r"\1", value)
[390] Fix | Delete
else:
[391] Fix | Delete
m = HEADER_VALUE_RE.search(text)
[392] Fix | Delete
if m: # unquoted value
[393] Fix | Delete
text = unmatched(m)
[394] Fix | Delete
value = m.group(1)
[395] Fix | Delete
value = value.rstrip()
[396] Fix | Delete
else:
[397] Fix | Delete
# no value, a lone token
[398] Fix | Delete
value = None
[399] Fix | Delete
pairs.append((name, value))
[400] Fix | Delete
elif text.lstrip().startswith(","):
[401] Fix | Delete
# concatenated headers, as per RFC 2616 section 4.2
[402] Fix | Delete
text = text.lstrip()[1:]
[403] Fix | Delete
if pairs: result.append(pairs)
[404] Fix | Delete
pairs = []
[405] Fix | Delete
else:
[406] Fix | Delete
# skip junk
[407] Fix | Delete
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
[408] Fix | Delete
assert nr_junk_chars > 0, (
[409] Fix | Delete
"split_header_words bug: '%s', '%s', %s" %
[410] Fix | Delete
(orig_text, text, pairs))
[411] Fix | Delete
text = non_junk
[412] Fix | Delete
if pairs: result.append(pairs)
[413] Fix | Delete
return result
[414] Fix | Delete
[415] Fix | Delete
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
[416] Fix | Delete
def join_header_words(lists):
[417] Fix | Delete
"""Do the inverse (almost) of the conversion done by split_header_words.
[418] Fix | Delete
[419] Fix | Delete
Takes a list of lists of (key, value) pairs and produces a single header
[420] Fix | Delete
value. Attribute values are quoted if needed.
[421] Fix | Delete
[422] Fix | Delete
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
[423] Fix | Delete
'text/plain; charset="iso-8859/1"'
[424] Fix | Delete
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
[425] Fix | Delete
'text/plain, charset="iso-8859/1"'
[426] Fix | Delete
[427] Fix | Delete
"""
[428] Fix | Delete
headers = []
[429] Fix | Delete
for pairs in lists:
[430] Fix | Delete
attr = []
[431] Fix | Delete
for k, v in pairs:
[432] Fix | Delete
if v is not None:
[433] Fix | Delete
if not re.search(r"^\w+$", v):
[434] Fix | Delete
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
[435] Fix | Delete
v = '"%s"' % v
[436] Fix | Delete
k = "%s=%s" % (k, v)
[437] Fix | Delete
attr.append(k)
[438] Fix | Delete
if attr: headers.append("; ".join(attr))
[439] Fix | Delete
return ", ".join(headers)
[440] Fix | Delete
[441] Fix | Delete
def _strip_quotes(text):
[442] Fix | Delete
if text.startswith('"'):
[443] Fix | Delete
text = text[1:]
[444] Fix | Delete
if text.endswith('"'):
[445] Fix | Delete
text = text[:-1]
[446] Fix | Delete
return text
[447] Fix | Delete
[448] Fix | Delete
def parse_ns_headers(ns_headers):
[449] Fix | Delete
"""Ad-hoc parser for Netscape protocol cookie-attributes.
[450] Fix | Delete
[451] Fix | Delete
The old Netscape cookie format for Set-Cookie can for instance contain
[452] Fix | Delete
an unquoted "," in the expires field, so we have to use this ad-hoc
[453] Fix | Delete
parser instead of split_header_words.
[454] Fix | Delete
[455] Fix | Delete
XXX This may not make the best possible effort to parse all the crap
[456] Fix | Delete
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
[457] Fix | Delete
parser is probably better, so could do worse than following that if
[458] Fix | Delete
this ever gives any trouble.
[459] Fix | Delete
[460] Fix | Delete
Currently, this is also used for parsing RFC 2109 cookies.
[461] Fix | Delete
[462] Fix | Delete
"""
[463] Fix | Delete
known_attrs = ("expires", "domain", "path", "secure",
[464] Fix | Delete
# RFC 2109 attrs (may turn up in Netscape cookies, too)
[465] Fix | Delete
"version", "port", "max-age")
[466] Fix | Delete
[467] Fix | Delete
result = []
[468] Fix | Delete
for ns_header in ns_headers:
[469] Fix | Delete
pairs = []
[470] Fix | Delete
version_set = False
[471] Fix | Delete
[472] Fix | Delete
# XXX: The following does not strictly adhere to RFCs in that empty
[473] Fix | Delete
# names and values are legal (the former will only appear once and will
[474] Fix | Delete
# be overwritten if multiple occurrences are present). This is
[475] Fix | Delete
# mostly to deal with backwards compatibility.
[476] Fix | Delete
for ii, param in enumerate(ns_header.split(';')):
[477] Fix | Delete
param = param.strip()
[478] Fix | Delete
[479] Fix | Delete
key, sep, val = param.partition('=')
[480] Fix | Delete
key = key.strip()
[481] Fix | Delete
[482] Fix | Delete
if not key:
[483] Fix | Delete
if ii == 0:
[484] Fix | Delete
break
[485] Fix | Delete
else:
[486] Fix | Delete
continue
[487] Fix | Delete
[488] Fix | Delete
# allow for a distinction between present and empty and missing
[489] Fix | Delete
# altogether
[490] Fix | Delete
val = val.strip() if sep else None
[491] Fix | Delete
[492] Fix | Delete
if ii != 0:
[493] Fix | Delete
lc = key.lower()
[494] Fix | Delete
if lc in known_attrs:
[495] Fix | Delete
key = lc
[496] Fix | Delete
[497] Fix | Delete
if key == "version":
[498] Fix | Delete
# This is an RFC 2109 cookie.
[499] Fix | Delete
It is recommended that you Edit text format, this type of Fix handles quite a lot in one request
Function