mirror of
https://github.com/microsoft/PowerToys.git
synced 2025-06-06 00:23:00 +08:00
Implement #15
This commit is contained in:
parent
d89968cfa5
commit
f1ce98eb07
@ -1,11 +1,9 @@
|
||||
#encoding=utf8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
import json
|
||||
import webbrowser
|
||||
from wox import Wox
|
||||
from wox import Wox,WoxAPI
|
||||
|
||||
class HackerNews(Wox):
|
||||
|
||||
@ -16,12 +14,16 @@ class HackerNews(Wox):
|
||||
for i in bs.select(".comhead"):
|
||||
title = i.previous_sibling.text
|
||||
url = i.previous_sibling["href"]
|
||||
results.append({"Title": title ,"IcoPath":"Images/app.ico","JsonRPCAction":{"method": "openUrl", "parameters": url}})
|
||||
#results.append({"Title": title ,"IcoPath":"Images/app.ico","JsonRPCAction":{"method": "Wox.ChangeQuery","parameters":[url,True]}})
|
||||
results.append({"Title": title ,"IcoPath":"Images/app.ico","JsonRPCAction":{"method": "openUrl","parameters":[url],"dontHideAfterAction":True}})
|
||||
#results.append({"Title": title ,"IcoPath":"Images/app.ico","JsonRPCAction":{"method": "Wox.ShowApp"}})
|
||||
|
||||
return results
|
||||
|
||||
def openUrl(self,url):
|
||||
webbrowser.open(url)
|
||||
#todo:doesn't work when move this line up
|
||||
WoxAPI.change_query(url)
|
||||
|
||||
if __name__ == "__main__":
|
||||
HackerNews()
|
||||
|
@ -1 +1,2 @@
|
||||
d:\Personal\wox.jsonrpc\Output\Debug\PythonHome\python.exe main.py "{\"jsonrpc\": \"2.0\", \"method\": \"query\", \"params\": \"l\", \"id\": 1}
|
||||
..\..\PythonHome\python.exe main.py "{\"jsonrpc\": \"2.0\", \"method\": \"query\", \"params\": \"l\", \"id\": 1}
|
||||
REM ..\..\PythonHome\python.exe main.py "{\"method\":\"openUrl\",\"parameters\":[\"https://blog.pinboard.in/2014/07/pinboard_turns_five/\",2]}"
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 19 KiB |
603
PythonHome/Lib/BaseHTTPServer.py
Normal file
603
PythonHome/Lib/BaseHTTPServer.py
Normal file
@ -0,0 +1,603 @@
|
||||
"""HTTP server base class.
|
||||
|
||||
Note: the class in this module doesn't implement any HTTP request; see
|
||||
SimpleHTTPServer for simple implementations of GET, HEAD and POST
|
||||
(including CGI scripts). It does, however, optionally implement HTTP/1.1
|
||||
persistent connections, as of version 0.3.
|
||||
|
||||
Contents:
|
||||
|
||||
- BaseHTTPRequestHandler: HTTP request handler base class
|
||||
- test: test function
|
||||
|
||||
XXX To do:
|
||||
|
||||
- log requests even later (to capture byte count)
|
||||
- log user-agent header and other interesting goodies
|
||||
- send error log to separate file
|
||||
"""
|
||||
|
||||
|
||||
# See also:
|
||||
#
|
||||
# HTTP Working Group T. Berners-Lee
|
||||
# INTERNET-DRAFT R. T. Fielding
|
||||
# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
|
||||
# Expires September 8, 1995 March 8, 1995
|
||||
#
|
||||
# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
|
||||
#
|
||||
# and
|
||||
#
|
||||
# Network Working Group R. Fielding
|
||||
# Request for Comments: 2616 et al
|
||||
# Obsoletes: 2068 June 1999
|
||||
# Category: Standards Track
|
||||
#
|
||||
# URL: http://www.faqs.org/rfcs/rfc2616.html
|
||||
|
||||
# Log files
|
||||
# ---------
|
||||
#
|
||||
# Here's a quote from the NCSA httpd docs about log file format.
|
||||
#
|
||||
# | The logfile format is as follows. Each line consists of:
|
||||
# |
|
||||
# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
|
||||
# |
|
||||
# | host: Either the DNS name or the IP number of the remote client
|
||||
# | rfc931: Any information returned by identd for this person,
|
||||
# | - otherwise.
|
||||
# | authuser: If user sent a userid for authentication, the user name,
|
||||
# | - otherwise.
|
||||
# | DD: Day
|
||||
# | Mon: Month (calendar name)
|
||||
# | YYYY: Year
|
||||
# | hh: hour (24-hour format, the machine's timezone)
|
||||
# | mm: minutes
|
||||
# | ss: seconds
|
||||
# | request: The first line of the HTTP request as sent by the client.
|
||||
# | ddd: the status code returned by the server, - if not available.
|
||||
# | bbbb: the total number of bytes sent,
|
||||
# | *not including the HTTP/1.0 header*, - if not available
|
||||
# |
|
||||
# | You can determine the name of the file accessed through request.
|
||||
#
|
||||
# (Actually, the latter is only true if you know the server configuration
|
||||
# at the time the request was made!)
|
||||
|
||||
__version__ = "0.3"
|
||||
|
||||
__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
|
||||
|
||||
import sys
|
||||
import time
|
||||
import socket # For gethostbyaddr()
|
||||
from warnings import filterwarnings, catch_warnings
|
||||
with catch_warnings():
|
||||
if sys.py3kwarning:
|
||||
filterwarnings("ignore", ".*mimetools has been removed",
|
||||
DeprecationWarning)
|
||||
import mimetools
|
||||
import SocketServer
|
||||
|
||||
# Default error message template
|
||||
DEFAULT_ERROR_MESSAGE = """\
|
||||
<head>
|
||||
<title>Error response</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Error response</h1>
|
||||
<p>Error code %(code)d.
|
||||
<p>Message: %(message)s.
|
||||
<p>Error code explanation: %(code)s = %(explain)s.
|
||||
</body>
|
||||
"""
|
||||
|
||||
DEFAULT_ERROR_CONTENT_TYPE = "text/html"
|
||||
|
||||
def _quote_html(html):
|
||||
return html.replace("&", "&").replace("<", "<").replace(">", ">")
|
||||
|
||||
class HTTPServer(SocketServer.TCPServer):
|
||||
|
||||
allow_reuse_address = 1 # Seems to make sense in testing environment
|
||||
|
||||
def server_bind(self):
|
||||
"""Override server_bind to store the server name."""
|
||||
SocketServer.TCPServer.server_bind(self)
|
||||
host, port = self.socket.getsockname()[:2]
|
||||
self.server_name = socket.getfqdn(host)
|
||||
self.server_port = port
|
||||
|
||||
|
||||
class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
|
||||
|
||||
"""HTTP request handler base class.
|
||||
|
||||
The following explanation of HTTP serves to guide you through the
|
||||
code as well as to expose any misunderstandings I may have about
|
||||
HTTP (so you don't need to read the code to figure out I'm wrong
|
||||
:-).
|
||||
|
||||
HTTP (HyperText Transfer Protocol) is an extensible protocol on
|
||||
top of a reliable stream transport (e.g. TCP/IP). The protocol
|
||||
recognizes three parts to a request:
|
||||
|
||||
1. One line identifying the request type and path
|
||||
2. An optional set of RFC-822-style headers
|
||||
3. An optional data part
|
||||
|
||||
The headers and data are separated by a blank line.
|
||||
|
||||
The first line of the request has the form
|
||||
|
||||
<command> <path> <version>
|
||||
|
||||
where <command> is a (case-sensitive) keyword such as GET or POST,
|
||||
<path> is a string containing path information for the request,
|
||||
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
|
||||
<path> is encoded using the URL encoding scheme (using %xx to signify
|
||||
the ASCII character with hex code xx).
|
||||
|
||||
The specification specifies that lines are separated by CRLF but
|
||||
for compatibility with the widest range of clients recommends
|
||||
servers also handle LF. Similarly, whitespace in the request line
|
||||
is treated sensibly (allowing multiple spaces between components
|
||||
and allowing trailing whitespace).
|
||||
|
||||
Similarly, for output, lines ought to be separated by CRLF pairs
|
||||
but most clients grok LF characters just fine.
|
||||
|
||||
If the first line of the request has the form
|
||||
|
||||
<command> <path>
|
||||
|
||||
(i.e. <version> is left out) then this is assumed to be an HTTP
|
||||
0.9 request; this form has no optional headers and data part and
|
||||
the reply consists of just the data.
|
||||
|
||||
The reply form of the HTTP 1.x protocol again has three parts:
|
||||
|
||||
1. One line giving the response code
|
||||
2. An optional set of RFC-822-style headers
|
||||
3. The data
|
||||
|
||||
Again, the headers and data are separated by a blank line.
|
||||
|
||||
The response code line has the form
|
||||
|
||||
<version> <responsecode> <responsestring>
|
||||
|
||||
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
|
||||
<responsecode> is a 3-digit response code indicating success or
|
||||
failure of the request, and <responsestring> is an optional
|
||||
human-readable string explaining what the response code means.
|
||||
|
||||
This server parses the request and the headers, and then calls a
|
||||
function specific to the request type (<command>). Specifically,
|
||||
a request SPAM will be handled by a method do_SPAM(). If no
|
||||
such method exists the server sends an error response to the
|
||||
client. If it exists, it is called with no arguments:
|
||||
|
||||
do_SPAM()
|
||||
|
||||
Note that the request name is case sensitive (i.e. SPAM and spam
|
||||
are different requests).
|
||||
|
||||
The various request details are stored in instance variables:
|
||||
|
||||
- client_address is the client IP address in the form (host,
|
||||
port);
|
||||
|
||||
- command, path and version are the broken-down request line;
|
||||
|
||||
- headers is an instance of mimetools.Message (or a derived
|
||||
class) containing the header information;
|
||||
|
||||
- rfile is a file object open for reading positioned at the
|
||||
start of the optional input data part;
|
||||
|
||||
- wfile is a file object open for writing.
|
||||
|
||||
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
|
||||
|
||||
The first thing to be written must be the response line. Then
|
||||
follow 0 or more header lines, then a blank line, and then the
|
||||
actual data (if any). The meaning of the header lines depends on
|
||||
the command executed by the server; in most cases, when data is
|
||||
returned, there should be at least one header line of the form
|
||||
|
||||
Content-type: <type>/<subtype>
|
||||
|
||||
where <type> and <subtype> should be registered MIME types,
|
||||
e.g. "text/html" or "text/plain".
|
||||
|
||||
"""
|
||||
|
||||
# The Python system version, truncated to its first component.
|
||||
sys_version = "Python/" + sys.version.split()[0]
|
||||
|
||||
# The server software version. You may want to override this.
|
||||
# The format is multiple whitespace-separated strings,
|
||||
# where each string is of the form name[/version].
|
||||
server_version = "BaseHTTP/" + __version__
|
||||
|
||||
# The default request version. This only affects responses up until
|
||||
# the point where the request line is parsed, so it mainly decides what
|
||||
# the client gets back when sending a malformed request line.
|
||||
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
|
||||
default_request_version = "HTTP/0.9"
|
||||
|
||||
def parse_request(self):
|
||||
"""Parse a request (internal).
|
||||
|
||||
The request should be stored in self.raw_requestline; the results
|
||||
are in self.command, self.path, self.request_version and
|
||||
self.headers.
|
||||
|
||||
Return True for success, False for failure; on failure, an
|
||||
error is sent back.
|
||||
|
||||
"""
|
||||
self.command = None # set in case of error on the first line
|
||||
self.request_version = version = self.default_request_version
|
||||
self.close_connection = 1
|
||||
requestline = self.raw_requestline
|
||||
requestline = requestline.rstrip('\r\n')
|
||||
self.requestline = requestline
|
||||
words = requestline.split()
|
||||
if len(words) == 3:
|
||||
command, path, version = words
|
||||
if version[:5] != 'HTTP/':
|
||||
self.send_error(400, "Bad request version (%r)" % version)
|
||||
return False
|
||||
try:
|
||||
base_version_number = version.split('/', 1)[1]
|
||||
version_number = base_version_number.split(".")
|
||||
# RFC 2145 section 3.1 says there can be only one "." and
|
||||
# - major and minor numbers MUST be treated as
|
||||
# separate integers;
|
||||
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
|
||||
# turn is lower than HTTP/12.3;
|
||||
# - Leading zeros MUST be ignored by recipients.
|
||||
if len(version_number) != 2:
|
||||
raise ValueError
|
||||
version_number = int(version_number[0]), int(version_number[1])
|
||||
except (ValueError, IndexError):
|
||||
self.send_error(400, "Bad request version (%r)" % version)
|
||||
return False
|
||||
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
|
||||
self.close_connection = 0
|
||||
if version_number >= (2, 0):
|
||||
self.send_error(505,
|
||||
"Invalid HTTP Version (%s)" % base_version_number)
|
||||
return False
|
||||
elif len(words) == 2:
|
||||
command, path = words
|
||||
self.close_connection = 1
|
||||
if command != 'GET':
|
||||
self.send_error(400,
|
||||
"Bad HTTP/0.9 request type (%r)" % command)
|
||||
return False
|
||||
elif not words:
|
||||
return False
|
||||
else:
|
||||
self.send_error(400, "Bad request syntax (%r)" % requestline)
|
||||
return False
|
||||
self.command, self.path, self.request_version = command, path, version
|
||||
|
||||
# Examine the headers and look for a Connection directive
|
||||
self.headers = self.MessageClass(self.rfile, 0)
|
||||
|
||||
conntype = self.headers.get('Connection', "")
|
||||
if conntype.lower() == 'close':
|
||||
self.close_connection = 1
|
||||
elif (conntype.lower() == 'keep-alive' and
|
||||
self.protocol_version >= "HTTP/1.1"):
|
||||
self.close_connection = 0
|
||||
return True
|
||||
|
||||
def handle_one_request(self):
|
||||
"""Handle a single HTTP request.
|
||||
|
||||
You normally don't need to override this method; see the class
|
||||
__doc__ string for information on how to handle specific HTTP
|
||||
commands such as GET and POST.
|
||||
|
||||
"""
|
||||
try:
|
||||
self.raw_requestline = self.rfile.readline(65537)
|
||||
if len(self.raw_requestline) > 65536:
|
||||
self.requestline = ''
|
||||
self.request_version = ''
|
||||
self.command = ''
|
||||
self.send_error(414)
|
||||
return
|
||||
if not self.raw_requestline:
|
||||
self.close_connection = 1
|
||||
return
|
||||
if not self.parse_request():
|
||||
# An error code has been sent, just exit
|
||||
return
|
||||
mname = 'do_' + self.command
|
||||
if not hasattr(self, mname):
|
||||
self.send_error(501, "Unsupported method (%r)" % self.command)
|
||||
return
|
||||
method = getattr(self, mname)
|
||||
method()
|
||||
self.wfile.flush() #actually send the response if not already done.
|
||||
except socket.timeout, e:
|
||||
#a read or a write timed out. Discard this connection
|
||||
self.log_error("Request timed out: %r", e)
|
||||
self.close_connection = 1
|
||||
return
|
||||
|
||||
def handle(self):
|
||||
"""Handle multiple requests if necessary."""
|
||||
self.close_connection = 1
|
||||
|
||||
self.handle_one_request()
|
||||
while not self.close_connection:
|
||||
self.handle_one_request()
|
||||
|
||||
def send_error(self, code, message=None):
|
||||
"""Send and log an error reply.
|
||||
|
||||
Arguments are the error code, and a detailed message.
|
||||
The detailed message defaults to the short entry matching the
|
||||
response code.
|
||||
|
||||
This sends an error response (so it must be called before any
|
||||
output has been generated), logs the error, and finally sends
|
||||
a piece of HTML explaining the error to the user.
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
short, long = self.responses[code]
|
||||
except KeyError:
|
||||
short, long = '???', '???'
|
||||
if message is None:
|
||||
message = short
|
||||
explain = long
|
||||
self.log_error("code %d, message %s", code, message)
|
||||
# using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
|
||||
content = (self.error_message_format %
|
||||
{'code': code, 'message': _quote_html(message), 'explain': explain})
|
||||
self.send_response(code, message)
|
||||
self.send_header("Content-Type", self.error_content_type)
|
||||
self.send_header('Connection', 'close')
|
||||
self.end_headers()
|
||||
if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
|
||||
self.wfile.write(content)
|
||||
|
||||
error_message_format = DEFAULT_ERROR_MESSAGE
|
||||
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
|
||||
|
||||
def send_response(self, code, message=None):
|
||||
"""Send the response header and log the response code.
|
||||
|
||||
Also send two standard headers with the server software
|
||||
version and the current date.
|
||||
|
||||
"""
|
||||
self.log_request(code)
|
||||
if message is None:
|
||||
if code in self.responses:
|
||||
message = self.responses[code][0]
|
||||
else:
|
||||
message = ''
|
||||
if self.request_version != 'HTTP/0.9':
|
||||
self.wfile.write("%s %d %s\r\n" %
|
||||
(self.protocol_version, code, message))
|
||||
# print (self.protocol_version, code, message)
|
||||
self.send_header('Server', self.version_string())
|
||||
self.send_header('Date', self.date_time_string())
|
||||
|
||||
def send_header(self, keyword, value):
|
||||
"""Send a MIME header."""
|
||||
if self.request_version != 'HTTP/0.9':
|
||||
self.wfile.write("%s: %s\r\n" % (keyword, value))
|
||||
|
||||
if keyword.lower() == 'connection':
|
||||
if value.lower() == 'close':
|
||||
self.close_connection = 1
|
||||
elif value.lower() == 'keep-alive':
|
||||
self.close_connection = 0
|
||||
|
||||
def end_headers(self):
|
||||
"""Send the blank line ending the MIME headers."""
|
||||
if self.request_version != 'HTTP/0.9':
|
||||
self.wfile.write("\r\n")
|
||||
|
||||
def log_request(self, code='-', size='-'):
|
||||
"""Log an accepted request.
|
||||
|
||||
This is called by send_response().
|
||||
|
||||
"""
|
||||
|
||||
self.log_message('"%s" %s %s',
|
||||
self.requestline, str(code), str(size))
|
||||
|
||||
def log_error(self, format, *args):
|
||||
"""Log an error.
|
||||
|
||||
This is called when a request cannot be fulfilled. By
|
||||
default it passes the message on to log_message().
|
||||
|
||||
Arguments are the same as for log_message().
|
||||
|
||||
XXX This should go to the separate error log.
|
||||
|
||||
"""
|
||||
|
||||
self.log_message(format, *args)
|
||||
|
||||
def log_message(self, format, *args):
|
||||
"""Log an arbitrary message.
|
||||
|
||||
This is used by all other logging functions. Override
|
||||
it if you have specific logging wishes.
|
||||
|
||||
The first argument, FORMAT, is a format string for the
|
||||
message to be logged. If the format string contains
|
||||
any % escapes requiring parameters, they should be
|
||||
specified as subsequent arguments (it's just like
|
||||
printf!).
|
||||
|
||||
The client ip address and current date/time are prefixed to every
|
||||
message.
|
||||
|
||||
"""
|
||||
|
||||
sys.stderr.write("%s - - [%s] %s\n" %
|
||||
(self.client_address[0],
|
||||
self.log_date_time_string(),
|
||||
format%args))
|
||||
|
||||
def version_string(self):
|
||||
"""Return the server software version string."""
|
||||
return self.server_version + ' ' + self.sys_version
|
||||
|
||||
def date_time_string(self, timestamp=None):
|
||||
"""Return the current date and time formatted for a message header."""
|
||||
if timestamp is None:
|
||||
timestamp = time.time()
|
||||
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
|
||||
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
|
||||
self.weekdayname[wd],
|
||||
day, self.monthname[month], year,
|
||||
hh, mm, ss)
|
||||
return s
|
||||
|
||||
def log_date_time_string(self):
|
||||
"""Return the current time formatted for logging."""
|
||||
now = time.time()
|
||||
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
|
||||
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
|
||||
day, self.monthname[month], year, hh, mm, ss)
|
||||
return s
|
||||
|
||||
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||
|
||||
monthname = [None,
|
||||
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
|
||||
def address_string(self):
|
||||
"""Return the client address formatted for logging.
|
||||
|
||||
This version looks up the full hostname using gethostbyaddr(),
|
||||
and tries to find a name that contains at least one dot.
|
||||
|
||||
"""
|
||||
|
||||
host, port = self.client_address[:2]
|
||||
return socket.getfqdn(host)
|
||||
|
||||
# Essentially static class variables
|
||||
|
||||
# The version of the HTTP protocol we support.
|
||||
# Set this to HTTP/1.1 to enable automatic keepalive
|
||||
protocol_version = "HTTP/1.0"
|
||||
|
||||
# The Message-like class used to parse headers
|
||||
MessageClass = mimetools.Message
|
||||
|
||||
# Table mapping response codes to messages; entries have the
|
||||
# form {code: (shortmessage, longmessage)}.
|
||||
# See RFC 2616.
|
||||
responses = {
|
||||
100: ('Continue', 'Request received, please continue'),
|
||||
101: ('Switching Protocols',
|
||||
'Switching to new protocol; obey Upgrade header'),
|
||||
|
||||
200: ('OK', 'Request fulfilled, document follows'),
|
||||
201: ('Created', 'Document created, URL follows'),
|
||||
202: ('Accepted',
|
||||
'Request accepted, processing continues off-line'),
|
||||
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
|
||||
204: ('No Content', 'Request fulfilled, nothing follows'),
|
||||
205: ('Reset Content', 'Clear input form for further input.'),
|
||||
206: ('Partial Content', 'Partial content follows.'),
|
||||
|
||||
300: ('Multiple Choices',
|
||||
'Object has several resources -- see URI list'),
|
||||
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
|
||||
302: ('Found', 'Object moved temporarily -- see URI list'),
|
||||
303: ('See Other', 'Object moved -- see Method and URL list'),
|
||||
304: ('Not Modified',
|
||||
'Document has not changed since given time'),
|
||||
305: ('Use Proxy',
|
||||
'You must use proxy specified in Location to access this '
|
||||
'resource.'),
|
||||
307: ('Temporary Redirect',
|
||||
'Object moved temporarily -- see URI list'),
|
||||
|
||||
400: ('Bad Request',
|
||||
'Bad request syntax or unsupported method'),
|
||||
401: ('Unauthorized',
|
||||
'No permission -- see authorization schemes'),
|
||||
402: ('Payment Required',
|
||||
'No payment -- see charging schemes'),
|
||||
403: ('Forbidden',
|
||||
'Request forbidden -- authorization will not help'),
|
||||
404: ('Not Found', 'Nothing matches the given URI'),
|
||||
405: ('Method Not Allowed',
|
||||
'Specified method is invalid for this resource.'),
|
||||
406: ('Not Acceptable', 'URI not available in preferred format.'),
|
||||
407: ('Proxy Authentication Required', 'You must authenticate with '
|
||||
'this proxy before proceeding.'),
|
||||
408: ('Request Timeout', 'Request timed out; try again later.'),
|
||||
409: ('Conflict', 'Request conflict.'),
|
||||
410: ('Gone',
|
||||
'URI no longer exists and has been permanently removed.'),
|
||||
411: ('Length Required', 'Client must specify Content-Length.'),
|
||||
412: ('Precondition Failed', 'Precondition in headers is false.'),
|
||||
413: ('Request Entity Too Large', 'Entity is too large.'),
|
||||
414: ('Request-URI Too Long', 'URI is too long.'),
|
||||
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
|
||||
416: ('Requested Range Not Satisfiable',
|
||||
'Cannot satisfy request range.'),
|
||||
417: ('Expectation Failed',
|
||||
'Expect condition could not be satisfied.'),
|
||||
|
||||
500: ('Internal Server Error', 'Server got itself in trouble'),
|
||||
501: ('Not Implemented',
|
||||
'Server does not support this operation'),
|
||||
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
|
||||
503: ('Service Unavailable',
|
||||
'The server cannot process the request due to a high load'),
|
||||
504: ('Gateway Timeout',
|
||||
'The gateway server did not receive a timely response'),
|
||||
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
|
||||
}
|
||||
|
||||
|
||||
def test(HandlerClass = BaseHTTPRequestHandler,
|
||||
ServerClass = HTTPServer, protocol="HTTP/1.0"):
|
||||
"""Test the HTTP request handler class.
|
||||
|
||||
This runs an HTTP server on port 8000 (or the first command line
|
||||
argument).
|
||||
|
||||
"""
|
||||
|
||||
if sys.argv[1:]:
|
||||
port = int(sys.argv[1])
|
||||
else:
|
||||
port = 8000
|
||||
server_address = ('', port)
|
||||
|
||||
HandlerClass.protocol_version = protocol
|
||||
httpd = ServerClass(server_address, HandlerClass)
|
||||
|
||||
sa = httpd.socket.getsockname()
|
||||
print "Serving HTTP on", sa[0], "port", sa[1], "..."
|
||||
httpd.serve_forever()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
Binary file not shown.
180
PythonHome/Lib/Bastion.py
Normal file
180
PythonHome/Lib/Bastion.py
Normal file
@ -0,0 +1,180 @@
|
||||
"""Bastionification utility.
|
||||
|
||||
A bastion (for another object -- the 'original') is an object that has
|
||||
the same methods as the original but does not give access to its
|
||||
instance variables. Bastions have a number of uses, but the most
|
||||
obvious one is to provide code executing in restricted mode with a
|
||||
safe interface to an object implemented in unrestricted mode.
|
||||
|
||||
The bastionification routine has an optional second argument which is
|
||||
a filter function. Only those methods for which the filter method
|
||||
(called with the method name as argument) returns true are accessible.
|
||||
The default filter method returns true unless the method name begins
|
||||
with an underscore.
|
||||
|
||||
There are a number of possible implementations of bastions. We use a
|
||||
'lazy' approach where the bastion's __getattr__() discipline does all
|
||||
the work for a particular method the first time it is used. This is
|
||||
usually fastest, especially if the user doesn't call all available
|
||||
methods. The retrieved methods are stored as instance variables of
|
||||
the bastion, so the overhead is only occurred on the first use of each
|
||||
method.
|
||||
|
||||
Detail: the bastion class has a __repr__() discipline which includes
|
||||
the repr() of the original object. This is precomputed when the
|
||||
bastion is created.
|
||||
|
||||
"""
|
||||
from warnings import warnpy3k
|
||||
warnpy3k("the Bastion module has been removed in Python 3.0", stacklevel=2)
|
||||
del warnpy3k
|
||||
|
||||
__all__ = ["BastionClass", "Bastion"]
|
||||
|
||||
from types import MethodType
|
||||
|
||||
|
||||
class BastionClass:
|
||||
|
||||
"""Helper class used by the Bastion() function.
|
||||
|
||||
You could subclass this and pass the subclass as the bastionclass
|
||||
argument to the Bastion() function, as long as the constructor has
|
||||
the same signature (a get() function and a name for the object).
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, get, name):
|
||||
"""Constructor.
|
||||
|
||||
Arguments:
|
||||
|
||||
get - a function that gets the attribute value (by name)
|
||||
name - a human-readable name for the original object
|
||||
(suggestion: use repr(object))
|
||||
|
||||
"""
|
||||
self._get_ = get
|
||||
self._name_ = name
|
||||
|
||||
def __repr__(self):
|
||||
"""Return a representation string.
|
||||
|
||||
This includes the name passed in to the constructor, so that
|
||||
if you print the bastion during debugging, at least you have
|
||||
some idea of what it is.
|
||||
|
||||
"""
|
||||
return "<Bastion for %s>" % self._name_
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Get an as-yet undefined attribute value.
|
||||
|
||||
This calls the get() function that was passed to the
|
||||
constructor. The result is stored as an instance variable so
|
||||
that the next time the same attribute is requested,
|
||||
__getattr__() won't be invoked.
|
||||
|
||||
If the get() function raises an exception, this is simply
|
||||
passed on -- exceptions are not cached.
|
||||
|
||||
"""
|
||||
attribute = self._get_(name)
|
||||
self.__dict__[name] = attribute
|
||||
return attribute
|
||||
|
||||
|
||||
def Bastion(object, filter = lambda name: name[:1] != '_',
|
||||
name=None, bastionclass=BastionClass):
|
||||
"""Create a bastion for an object, using an optional filter.
|
||||
|
||||
See the Bastion module's documentation for background.
|
||||
|
||||
Arguments:
|
||||
|
||||
object - the original object
|
||||
filter - a predicate that decides whether a function name is OK;
|
||||
by default all names are OK that don't start with '_'
|
||||
name - the name of the object; default repr(object)
|
||||
bastionclass - class used to create the bastion; default BastionClass
|
||||
|
||||
"""
|
||||
|
||||
raise RuntimeError, "This code is not secure in Python 2.2 and later"
|
||||
|
||||
# Note: we define *two* ad-hoc functions here, get1 and get2.
|
||||
# Both are intended to be called in the same way: get(name).
|
||||
# It is clear that the real work (getting the attribute
|
||||
# from the object and calling the filter) is done in get1.
|
||||
# Why can't we pass get1 to the bastion? Because the user
|
||||
# would be able to override the filter argument! With get2,
|
||||
# overriding the default argument is no security loophole:
|
||||
# all it does is call it.
|
||||
# Also notice that we can't place the object and filter as
|
||||
# instance variables on the bastion object itself, since
|
||||
# the user has full access to all instance variables!
|
||||
|
||||
def get1(name, object=object, filter=filter):
|
||||
"""Internal function for Bastion(). See source comments."""
|
||||
if filter(name):
|
||||
attribute = getattr(object, name)
|
||||
if type(attribute) == MethodType:
|
||||
return attribute
|
||||
raise AttributeError, name
|
||||
|
||||
def get2(name, get1=get1):
|
||||
"""Internal function for Bastion(). See source comments."""
|
||||
return get1(name)
|
||||
|
||||
if name is None:
|
||||
name = repr(object)
|
||||
return bastionclass(get2, name)
|
||||
|
||||
|
||||
def _test():
|
||||
"""Test the Bastion() function."""
|
||||
class Original:
|
||||
def __init__(self):
|
||||
self.sum = 0
|
||||
def add(self, n):
|
||||
self._add(n)
|
||||
def _add(self, n):
|
||||
self.sum = self.sum + n
|
||||
def total(self):
|
||||
return self.sum
|
||||
o = Original()
|
||||
b = Bastion(o)
|
||||
testcode = """if 1:
|
||||
b.add(81)
|
||||
b.add(18)
|
||||
print "b.total() =", b.total()
|
||||
try:
|
||||
print "b.sum =", b.sum,
|
||||
except:
|
||||
print "inaccessible"
|
||||
else:
|
||||
print "accessible"
|
||||
try:
|
||||
print "b._add =", b._add,
|
||||
except:
|
||||
print "inaccessible"
|
||||
else:
|
||||
print "accessible"
|
||||
try:
|
||||
print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
|
||||
except:
|
||||
print "inaccessible"
|
||||
else:
|
||||
print "accessible"
|
||||
\n"""
|
||||
exec testcode
|
||||
print '='*20, "Using rexec:", '='*20
|
||||
import rexec
|
||||
r = rexec.RExec()
|
||||
m = r.add_module('__main__')
|
||||
m.b = b
|
||||
r.r_exec(testcode)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
_test()
|
Binary file not shown.
377
PythonHome/Lib/CGIHTTPServer.py
Normal file
377
PythonHome/Lib/CGIHTTPServer.py
Normal file
@ -0,0 +1,377 @@
|
||||
"""CGI-savvy HTTP Server.
|
||||
|
||||
This module builds on SimpleHTTPServer by implementing GET and POST
|
||||
requests to cgi-bin scripts.
|
||||
|
||||
If the os.fork() function is not present (e.g. on Windows),
|
||||
os.popen2() is used as a fallback, with slightly altered semantics; if
|
||||
that function is not present either (e.g. on Macintosh), only Python
|
||||
scripts are supported, and they are executed by the current process.
|
||||
|
||||
In all cases, the implementation is intentionally naive -- all
|
||||
requests are executed sychronously.
|
||||
|
||||
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
|
||||
-- it may execute arbitrary Python code or external programs.
|
||||
|
||||
Note that status code 200 is sent prior to execution of a CGI script, so
|
||||
scripts cannot send other status codes such as 302 (redirect).
|
||||
"""
|
||||
|
||||
|
||||
__version__ = "0.4"
|
||||
|
||||
__all__ = ["CGIHTTPRequestHandler"]
|
||||
|
||||
import os
|
||||
import sys
|
||||
import urllib
|
||||
import BaseHTTPServer
|
||||
import SimpleHTTPServer
|
||||
import select
|
||||
import copy
|
||||
|
||||
|
||||
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
|
||||
|
||||
"""Complete HTTP server with GET, HEAD and POST commands.
|
||||
|
||||
GET and HEAD also support running CGI scripts.
|
||||
|
||||
The POST command is *only* implemented for CGI scripts.
|
||||
|
||||
"""
|
||||
|
||||
# Determine platform specifics
|
||||
have_fork = hasattr(os, 'fork')
|
||||
have_popen2 = hasattr(os, 'popen2')
|
||||
have_popen3 = hasattr(os, 'popen3')
|
||||
|
||||
# Make rfile unbuffered -- we need to read one line and then pass
|
||||
# the rest to a subprocess, so we can't use buffered input.
|
||||
rbufsize = 0
|
||||
|
||||
def do_POST(self):
|
||||
"""Serve a POST request.
|
||||
|
||||
This is only implemented for CGI scripts.
|
||||
|
||||
"""
|
||||
|
||||
if self.is_cgi():
|
||||
self.run_cgi()
|
||||
else:
|
||||
self.send_error(501, "Can only POST to CGI scripts")
|
||||
|
||||
def send_head(self):
|
||||
"""Version of send_head that support CGI scripts"""
|
||||
if self.is_cgi():
|
||||
return self.run_cgi()
|
||||
else:
|
||||
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
|
||||
|
||||
def is_cgi(self):
|
||||
"""Test whether self.path corresponds to a CGI script.
|
||||
|
||||
Returns True and updates the cgi_info attribute to the tuple
|
||||
(dir, rest) if self.path requires running a CGI script.
|
||||
Returns False otherwise.
|
||||
|
||||
If any exception is raised, the caller should assume that
|
||||
self.path was rejected as invalid and act accordingly.
|
||||
|
||||
The default implementation tests whether the normalized url
|
||||
path begins with one of the strings in self.cgi_directories
|
||||
(and the next character is a '/' or the end of the string).
|
||||
"""
|
||||
collapsed_path = _url_collapse_path(urllib.unquote(self.path))
|
||||
dir_sep = collapsed_path.find('/', 1)
|
||||
head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
|
||||
if head in self.cgi_directories:
|
||||
self.cgi_info = head, tail
|
||||
return True
|
||||
return False
|
||||
|
||||
cgi_directories = ['/cgi-bin', '/htbin']
|
||||
|
||||
def is_executable(self, path):
|
||||
"""Test whether argument path is an executable file."""
|
||||
return executable(path)
|
||||
|
||||
def is_python(self, path):
|
||||
"""Test whether argument path is a Python script."""
|
||||
head, tail = os.path.splitext(path)
|
||||
return tail.lower() in (".py", ".pyw")
|
||||
|
||||
def run_cgi(self):
|
||||
"""Execute a CGI script."""
|
||||
dir, rest = self.cgi_info
|
||||
|
||||
i = rest.find('/')
|
||||
while i >= 0:
|
||||
nextdir = rest[:i]
|
||||
nextrest = rest[i+1:]
|
||||
|
||||
scriptdir = self.translate_path(nextdir)
|
||||
if os.path.isdir(scriptdir):
|
||||
dir, rest = nextdir, nextrest
|
||||
i = rest.find('/')
|
||||
else:
|
||||
break
|
||||
|
||||
# find an explicit query string, if present.
|
||||
i = rest.rfind('?')
|
||||
if i >= 0:
|
||||
rest, query = rest[:i], rest[i+1:]
|
||||
else:
|
||||
query = ''
|
||||
|
||||
# dissect the part after the directory name into a script name &
|
||||
# a possible additional path, to be stored in PATH_INFO.
|
||||
i = rest.find('/')
|
||||
if i >= 0:
|
||||
script, rest = rest[:i], rest[i:]
|
||||
else:
|
||||
script, rest = rest, ''
|
||||
|
||||
scriptname = dir + '/' + script
|
||||
scriptfile = self.translate_path(scriptname)
|
||||
if not os.path.exists(scriptfile):
|
||||
self.send_error(404, "No such CGI script (%r)" % scriptname)
|
||||
return
|
||||
if not os.path.isfile(scriptfile):
|
||||
self.send_error(403, "CGI script is not a plain file (%r)" %
|
||||
scriptname)
|
||||
return
|
||||
ispy = self.is_python(scriptname)
|
||||
if not ispy:
|
||||
if not (self.have_fork or self.have_popen2 or self.have_popen3):
|
||||
self.send_error(403, "CGI script is not a Python script (%r)" %
|
||||
scriptname)
|
||||
return
|
||||
if not self.is_executable(scriptfile):
|
||||
self.send_error(403, "CGI script is not executable (%r)" %
|
||||
scriptname)
|
||||
return
|
||||
|
||||
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
|
||||
# XXX Much of the following could be prepared ahead of time!
|
||||
env = copy.deepcopy(os.environ)
|
||||
env['SERVER_SOFTWARE'] = self.version_string()
|
||||
env['SERVER_NAME'] = self.server.server_name
|
||||
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
|
||||
env['SERVER_PROTOCOL'] = self.protocol_version
|
||||
env['SERVER_PORT'] = str(self.server.server_port)
|
||||
env['REQUEST_METHOD'] = self.command
|
||||
uqrest = urllib.unquote(rest)
|
||||
env['PATH_INFO'] = uqrest
|
||||
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
|
||||
env['SCRIPT_NAME'] = scriptname
|
||||
if query:
|
||||
env['QUERY_STRING'] = query
|
||||
host = self.address_string()
|
||||
if host != self.client_address[0]:
|
||||
env['REMOTE_HOST'] = host
|
||||
env['REMOTE_ADDR'] = self.client_address[0]
|
||||
authorization = self.headers.getheader("authorization")
|
||||
if authorization:
|
||||
authorization = authorization.split()
|
||||
if len(authorization) == 2:
|
||||
import base64, binascii
|
||||
env['AUTH_TYPE'] = authorization[0]
|
||||
if authorization[0].lower() == "basic":
|
||||
try:
|
||||
authorization = base64.decodestring(authorization[1])
|
||||
except binascii.Error:
|
||||
pass
|
||||
else:
|
||||
authorization = authorization.split(':')
|
||||
if len(authorization) == 2:
|
||||
env['REMOTE_USER'] = authorization[0]
|
||||
# XXX REMOTE_IDENT
|
||||
if self.headers.typeheader is None:
|
||||
env['CONTENT_TYPE'] = self.headers.type
|
||||
else:
|
||||
env['CONTENT_TYPE'] = self.headers.typeheader
|
||||
length = self.headers.getheader('content-length')
|
||||
if length:
|
||||
env['CONTENT_LENGTH'] = length
|
||||
referer = self.headers.getheader('referer')
|
||||
if referer:
|
||||
env['HTTP_REFERER'] = referer
|
||||
accept = []
|
||||
for line in self.headers.getallmatchingheaders('accept'):
|
||||
if line[:1] in "\t\n\r ":
|
||||
accept.append(line.strip())
|
||||
else:
|
||||
accept = accept + line[7:].split(',')
|
||||
env['HTTP_ACCEPT'] = ','.join(accept)
|
||||
ua = self.headers.getheader('user-agent')
|
||||
if ua:
|
||||
env['HTTP_USER_AGENT'] = ua
|
||||
co = filter(None, self.headers.getheaders('cookie'))
|
||||
if co:
|
||||
env['HTTP_COOKIE'] = ', '.join(co)
|
||||
# XXX Other HTTP_* headers
|
||||
# Since we're setting the env in the parent, provide empty
|
||||
# values to override previously set values
|
||||
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
|
||||
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
|
||||
env.setdefault(k, "")
|
||||
|
||||
self.send_response(200, "Script output follows")
|
||||
|
||||
decoded_query = query.replace('+', ' ')
|
||||
|
||||
if self.have_fork:
|
||||
# Unix -- fork as we should
|
||||
args = [script]
|
||||
if '=' not in decoded_query:
|
||||
args.append(decoded_query)
|
||||
nobody = nobody_uid()
|
||||
self.wfile.flush() # Always flush before forking
|
||||
pid = os.fork()
|
||||
if pid != 0:
|
||||
# Parent
|
||||
pid, sts = os.waitpid(pid, 0)
|
||||
# throw away additional data [see bug #427345]
|
||||
while select.select([self.rfile], [], [], 0)[0]:
|
||||
if not self.rfile.read(1):
|
||||
break
|
||||
if sts:
|
||||
self.log_error("CGI script exit status %#x", sts)
|
||||
return
|
||||
# Child
|
||||
try:
|
||||
try:
|
||||
os.setuid(nobody)
|
||||
except os.error:
|
||||
pass
|
||||
os.dup2(self.rfile.fileno(), 0)
|
||||
os.dup2(self.wfile.fileno(), 1)
|
||||
os.execve(scriptfile, args, env)
|
||||
except:
|
||||
self.server.handle_error(self.request, self.client_address)
|
||||
os._exit(127)
|
||||
|
||||
else:
|
||||
# Non Unix - use subprocess
|
||||
import subprocess
|
||||
cmdline = [scriptfile]
|
||||
if self.is_python(scriptfile):
|
||||
interp = sys.executable
|
||||
if interp.lower().endswith("w.exe"):
|
||||
# On Windows, use python.exe, not pythonw.exe
|
||||
interp = interp[:-5] + interp[-4:]
|
||||
cmdline = [interp, '-u'] + cmdline
|
||||
if '=' not in query:
|
||||
cmdline.append(query)
|
||||
|
||||
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
|
||||
try:
|
||||
nbytes = int(length)
|
||||
except (TypeError, ValueError):
|
||||
nbytes = 0
|
||||
p = subprocess.Popen(cmdline,
|
||||
stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE,
|
||||
env = env
|
||||
)
|
||||
if self.command.lower() == "post" and nbytes > 0:
|
||||
data = self.rfile.read(nbytes)
|
||||
else:
|
||||
data = None
|
||||
# throw away additional data [see bug #427345]
|
||||
while select.select([self.rfile._sock], [], [], 0)[0]:
|
||||
if not self.rfile._sock.recv(1):
|
||||
break
|
||||
stdout, stderr = p.communicate(data)
|
||||
self.wfile.write(stdout)
|
||||
if stderr:
|
||||
self.log_error('%s', stderr)
|
||||
p.stderr.close()
|
||||
p.stdout.close()
|
||||
status = p.returncode
|
||||
if status:
|
||||
self.log_error("CGI script exit status %#x", status)
|
||||
else:
|
||||
self.log_message("CGI script exited OK")
|
||||
|
||||
|
||||
def _url_collapse_path(path):
|
||||
"""
|
||||
Given a URL path, remove extra '/'s and '.' path elements and collapse
|
||||
any '..' references and returns a colllapsed path.
|
||||
|
||||
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
|
||||
The utility of this function is limited to is_cgi method and helps
|
||||
preventing some security attacks.
|
||||
|
||||
Returns: A tuple of (head, tail) where tail is everything after the final /
|
||||
and head is everything before it. Head will always start with a '/' and,
|
||||
if it contains anything else, never have a trailing '/'.
|
||||
|
||||
Raises: IndexError if too many '..' occur within the path.
|
||||
|
||||
"""
|
||||
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
|
||||
# path semantics rather than local operating system semantics.
|
||||
path_parts = path.split('/')
|
||||
head_parts = []
|
||||
for part in path_parts[:-1]:
|
||||
if part == '..':
|
||||
head_parts.pop() # IndexError if more '..' than prior parts
|
||||
elif part and part != '.':
|
||||
head_parts.append( part )
|
||||
if path_parts:
|
||||
tail_part = path_parts.pop()
|
||||
if tail_part:
|
||||
if tail_part == '..':
|
||||
head_parts.pop()
|
||||
tail_part = ''
|
||||
elif tail_part == '.':
|
||||
tail_part = ''
|
||||
else:
|
||||
tail_part = ''
|
||||
|
||||
splitpath = ('/' + '/'.join(head_parts), tail_part)
|
||||
collapsed_path = "/".join(splitpath)
|
||||
|
||||
return collapsed_path
|
||||
|
||||
|
||||
nobody = None
|
||||
|
||||
def nobody_uid():
|
||||
"""Internal routine to get nobody's uid"""
|
||||
global nobody
|
||||
if nobody:
|
||||
return nobody
|
||||
try:
|
||||
import pwd
|
||||
except ImportError:
|
||||
return -1
|
||||
try:
|
||||
nobody = pwd.getpwnam('nobody')[2]
|
||||
except KeyError:
|
||||
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
|
||||
return nobody
|
||||
|
||||
|
||||
def executable(path):
|
||||
"""Test for executable file."""
|
||||
try:
|
||||
st = os.stat(path)
|
||||
except os.error:
|
||||
return False
|
||||
return st.st_mode & 0111 != 0
|
||||
|
||||
|
||||
def test(HandlerClass = CGIHTTPRequestHandler,
|
||||
ServerClass = BaseHTTPServer.HTTPServer):
|
||||
SimpleHTTPServer.test(HandlerClass, ServerClass)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
Binary file not shown.
753
PythonHome/Lib/ConfigParser.py
Normal file
753
PythonHome/Lib/ConfigParser.py
Normal file
@ -0,0 +1,753 @@
|
||||
"""Configuration file parser.
|
||||
|
||||
A setup file consists of sections, lead by a "[section]" header,
|
||||
and followed by "name: value" entries, with continuations and such in
|
||||
the style of RFC 822.
|
||||
|
||||
The option values can contain format strings which refer to other values in
|
||||
the same section, or values in a special [DEFAULT] section.
|
||||
|
||||
For example:
|
||||
|
||||
something: %(dir)s/whatever
|
||||
|
||||
would resolve the "%(dir)s" to the value of dir. All reference
|
||||
expansions are done late, on demand.
|
||||
|
||||
Intrinsic defaults can be specified by passing them into the
|
||||
ConfigParser constructor as a dictionary.
|
||||
|
||||
class:
|
||||
|
||||
ConfigParser -- responsible for parsing a list of
|
||||
configuration files, and managing the parsed database.
|
||||
|
||||
methods:
|
||||
|
||||
__init__(defaults=None)
|
||||
create the parser and specify a dictionary of intrinsic defaults. The
|
||||
keys must be strings, the values must be appropriate for %()s string
|
||||
interpolation. Note that `__name__' is always an intrinsic default;
|
||||
its value is the section's name.
|
||||
|
||||
sections()
|
||||
return all the configuration section names, sans DEFAULT
|
||||
|
||||
has_section(section)
|
||||
return whether the given section exists
|
||||
|
||||
has_option(section, option)
|
||||
return whether the given option exists in the given section
|
||||
|
||||
options(section)
|
||||
return list of configuration options for the named section
|
||||
|
||||
read(filenames)
|
||||
read and parse the list of named configuration files, given by
|
||||
name. A single filename is also allowed. Non-existing files
|
||||
are ignored. Return list of successfully read files.
|
||||
|
||||
readfp(fp, filename=None)
|
||||
read and parse one configuration file, given as a file object.
|
||||
The filename defaults to fp.name; it is only used in error
|
||||
messages (if fp has no `name' attribute, the string `<???>' is used).
|
||||
|
||||
get(section, option, raw=False, vars=None)
|
||||
return a string value for the named option. All % interpolations are
|
||||
expanded in the return values, based on the defaults passed into the
|
||||
constructor and the DEFAULT section. Additional substitutions may be
|
||||
provided using the `vars' argument, which must be a dictionary whose
|
||||
contents override any pre-existing defaults.
|
||||
|
||||
getint(section, options)
|
||||
like get(), but convert value to an integer
|
||||
|
||||
getfloat(section, options)
|
||||
like get(), but convert value to a float
|
||||
|
||||
getboolean(section, options)
|
||||
like get(), but convert value to a boolean (currently case
|
||||
insensitively defined as 0, false, no, off for False, and 1, true,
|
||||
yes, on for True). Returns False or True.
|
||||
|
||||
items(section, raw=False, vars=None)
|
||||
return a list of tuples with (name, value) for each option
|
||||
in the section.
|
||||
|
||||
remove_section(section)
|
||||
remove the given file section and all its options
|
||||
|
||||
remove_option(section, option)
|
||||
remove the given option from the given section
|
||||
|
||||
set(section, option, value)
|
||||
set the given option
|
||||
|
||||
write(fp)
|
||||
write the configuration state in .ini format
|
||||
"""
|
||||
|
||||
try:
|
||||
from collections import OrderedDict as _default_dict
|
||||
except ImportError:
|
||||
# fallback for setup.py which hasn't yet built _collections
|
||||
_default_dict = dict
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
|
||||
"InterpolationError", "InterpolationDepthError",
|
||||
"InterpolationSyntaxError", "ParsingError",
|
||||
"MissingSectionHeaderError",
|
||||
"ConfigParser", "SafeConfigParser", "RawConfigParser",
|
||||
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
|
||||
|
||||
DEFAULTSECT = "DEFAULT"
|
||||
|
||||
MAX_INTERPOLATION_DEPTH = 10
|
||||
|
||||
|
||||
|
||||
# exception classes
|
||||
class Error(Exception):
|
||||
"""Base class for ConfigParser exceptions."""
|
||||
|
||||
def _get_message(self):
|
||||
"""Getter for 'message'; needed only to override deprecation in
|
||||
BaseException."""
|
||||
return self.__message
|
||||
|
||||
def _set_message(self, value):
|
||||
"""Setter for 'message'; needed only to override deprecation in
|
||||
BaseException."""
|
||||
self.__message = value
|
||||
|
||||
# BaseException.message has been deprecated since Python 2.6. To prevent
|
||||
# DeprecationWarning from popping up over this pre-existing attribute, use
|
||||
# a new property that takes lookup precedence.
|
||||
message = property(_get_message, _set_message)
|
||||
|
||||
def __init__(self, msg=''):
|
||||
self.message = msg
|
||||
Exception.__init__(self, msg)
|
||||
|
||||
def __repr__(self):
|
||||
return self.message
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
class NoSectionError(Error):
|
||||
"""Raised when no section matches a requested option."""
|
||||
|
||||
def __init__(self, section):
|
||||
Error.__init__(self, 'No section: %r' % (section,))
|
||||
self.section = section
|
||||
self.args = (section, )
|
||||
|
||||
class DuplicateSectionError(Error):
|
||||
"""Raised when a section is multiply-created."""
|
||||
|
||||
def __init__(self, section):
|
||||
Error.__init__(self, "Section %r already exists" % section)
|
||||
self.section = section
|
||||
self.args = (section, )
|
||||
|
||||
class NoOptionError(Error):
|
||||
"""A requested option was not found."""
|
||||
|
||||
def __init__(self, option, section):
|
||||
Error.__init__(self, "No option %r in section: %r" %
|
||||
(option, section))
|
||||
self.option = option
|
||||
self.section = section
|
||||
self.args = (option, section)
|
||||
|
||||
class InterpolationError(Error):
|
||||
"""Base class for interpolation-related exceptions."""
|
||||
|
||||
def __init__(self, option, section, msg):
|
||||
Error.__init__(self, msg)
|
||||
self.option = option
|
||||
self.section = section
|
||||
self.args = (option, section, msg)
|
||||
|
||||
class InterpolationMissingOptionError(InterpolationError):
|
||||
"""A string substitution required a setting which was not available."""
|
||||
|
||||
def __init__(self, option, section, rawval, reference):
|
||||
msg = ("Bad value substitution:\n"
|
||||
"\tsection: [%s]\n"
|
||||
"\toption : %s\n"
|
||||
"\tkey : %s\n"
|
||||
"\trawval : %s\n"
|
||||
% (section, option, reference, rawval))
|
||||
InterpolationError.__init__(self, option, section, msg)
|
||||
self.reference = reference
|
||||
self.args = (option, section, rawval, reference)
|
||||
|
||||
class InterpolationSyntaxError(InterpolationError):
|
||||
"""Raised when the source text into which substitutions are made
|
||||
does not conform to the required syntax."""
|
||||
|
||||
class InterpolationDepthError(InterpolationError):
|
||||
"""Raised when substitutions are nested too deeply."""
|
||||
|
||||
def __init__(self, option, section, rawval):
|
||||
msg = ("Value interpolation too deeply recursive:\n"
|
||||
"\tsection: [%s]\n"
|
||||
"\toption : %s\n"
|
||||
"\trawval : %s\n"
|
||||
% (section, option, rawval))
|
||||
InterpolationError.__init__(self, option, section, msg)
|
||||
self.args = (option, section, rawval)
|
||||
|
||||
class ParsingError(Error):
|
||||
"""Raised when a configuration file does not follow legal syntax."""
|
||||
|
||||
def __init__(self, filename):
|
||||
Error.__init__(self, 'File contains parsing errors: %s' % filename)
|
||||
self.filename = filename
|
||||
self.errors = []
|
||||
self.args = (filename, )
|
||||
|
||||
def append(self, lineno, line):
|
||||
self.errors.append((lineno, line))
|
||||
self.message += '\n\t[line %2d]: %s' % (lineno, line)
|
||||
|
||||
class MissingSectionHeaderError(ParsingError):
|
||||
"""Raised when a key-value pair is found before any section header."""
|
||||
|
||||
def __init__(self, filename, lineno, line):
|
||||
Error.__init__(
|
||||
self,
|
||||
'File contains no section headers.\nfile: %s, line: %d\n%r' %
|
||||
(filename, lineno, line))
|
||||
self.filename = filename
|
||||
self.lineno = lineno
|
||||
self.line = line
|
||||
self.args = (filename, lineno, line)
|
||||
|
||||
|
||||
class RawConfigParser:
|
||||
def __init__(self, defaults=None, dict_type=_default_dict,
|
||||
allow_no_value=False):
|
||||
self._dict = dict_type
|
||||
self._sections = self._dict()
|
||||
self._defaults = self._dict()
|
||||
if allow_no_value:
|
||||
self._optcre = self.OPTCRE_NV
|
||||
else:
|
||||
self._optcre = self.OPTCRE
|
||||
if defaults:
|
||||
for key, value in defaults.items():
|
||||
self._defaults[self.optionxform(key)] = value
|
||||
|
||||
def defaults(self):
|
||||
return self._defaults
|
||||
|
||||
def sections(self):
|
||||
"""Return a list of section names, excluding [DEFAULT]"""
|
||||
# self._sections will never have [DEFAULT] in it
|
||||
return self._sections.keys()
|
||||
|
||||
def add_section(self, section):
|
||||
"""Create a new section in the configuration.
|
||||
|
||||
Raise DuplicateSectionError if a section by the specified name
|
||||
already exists. Raise ValueError if name is DEFAULT or any of it's
|
||||
case-insensitive variants.
|
||||
"""
|
||||
if section.lower() == "default":
|
||||
raise ValueError, 'Invalid section name: %s' % section
|
||||
|
||||
if section in self._sections:
|
||||
raise DuplicateSectionError(section)
|
||||
self._sections[section] = self._dict()
|
||||
|
||||
def has_section(self, section):
|
||||
"""Indicate whether the named section is present in the configuration.
|
||||
|
||||
The DEFAULT section is not acknowledged.
|
||||
"""
|
||||
return section in self._sections
|
||||
|
||||
def options(self, section):
|
||||
"""Return a list of option names for the given section name."""
|
||||
try:
|
||||
opts = self._sections[section].copy()
|
||||
except KeyError:
|
||||
raise NoSectionError(section)
|
||||
opts.update(self._defaults)
|
||||
if '__name__' in opts:
|
||||
del opts['__name__']
|
||||
return opts.keys()
|
||||
|
||||
def read(self, filenames):
|
||||
"""Read and parse a filename or a list of filenames.
|
||||
|
||||
Files that cannot be opened are silently ignored; this is
|
||||
designed so that you can specify a list of potential
|
||||
configuration file locations (e.g. current directory, user's
|
||||
home directory, systemwide directory), and all existing
|
||||
configuration files in the list will be read. A single
|
||||
filename may also be given.
|
||||
|
||||
Return list of successfully read files.
|
||||
"""
|
||||
if isinstance(filenames, basestring):
|
||||
filenames = [filenames]
|
||||
read_ok = []
|
||||
for filename in filenames:
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError:
|
||||
continue
|
||||
self._read(fp, filename)
|
||||
fp.close()
|
||||
read_ok.append(filename)
|
||||
return read_ok
|
||||
|
||||
def readfp(self, fp, filename=None):
|
||||
"""Like read() but the argument must be a file-like object.
|
||||
|
||||
The `fp' argument must have a `readline' method. Optional
|
||||
second argument is the `filename', which if not given, is
|
||||
taken from fp.name. If fp has no `name' attribute, `<???>' is
|
||||
used.
|
||||
|
||||
"""
|
||||
if filename is None:
|
||||
try:
|
||||
filename = fp.name
|
||||
except AttributeError:
|
||||
filename = '<???>'
|
||||
self._read(fp, filename)
|
||||
|
||||
def get(self, section, option):
|
||||
opt = self.optionxform(option)
|
||||
if section not in self._sections:
|
||||
if section != DEFAULTSECT:
|
||||
raise NoSectionError(section)
|
||||
if opt in self._defaults:
|
||||
return self._defaults[opt]
|
||||
else:
|
||||
raise NoOptionError(option, section)
|
||||
elif opt in self._sections[section]:
|
||||
return self._sections[section][opt]
|
||||
elif opt in self._defaults:
|
||||
return self._defaults[opt]
|
||||
else:
|
||||
raise NoOptionError(option, section)
|
||||
|
||||
def items(self, section):
|
||||
try:
|
||||
d2 = self._sections[section]
|
||||
except KeyError:
|
||||
if section != DEFAULTSECT:
|
||||
raise NoSectionError(section)
|
||||
d2 = self._dict()
|
||||
d = self._defaults.copy()
|
||||
d.update(d2)
|
||||
if "__name__" in d:
|
||||
del d["__name__"]
|
||||
return d.items()
|
||||
|
||||
def _get(self, section, conv, option):
|
||||
return conv(self.get(section, option))
|
||||
|
||||
def getint(self, section, option):
|
||||
return self._get(section, int, option)
|
||||
|
||||
def getfloat(self, section, option):
|
||||
return self._get(section, float, option)
|
||||
|
||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||
'0': False, 'no': False, 'false': False, 'off': False}
|
||||
|
||||
def getboolean(self, section, option):
|
||||
v = self.get(section, option)
|
||||
if v.lower() not in self._boolean_states:
|
||||
raise ValueError, 'Not a boolean: %s' % v
|
||||
return self._boolean_states[v.lower()]
|
||||
|
||||
def optionxform(self, optionstr):
|
||||
return optionstr.lower()
|
||||
|
||||
def has_option(self, section, option):
|
||||
"""Check for the existence of a given option in a given section."""
|
||||
if not section or section == DEFAULTSECT:
|
||||
option = self.optionxform(option)
|
||||
return option in self._defaults
|
||||
elif section not in self._sections:
|
||||
return False
|
||||
else:
|
||||
option = self.optionxform(option)
|
||||
return (option in self._sections[section]
|
||||
or option in self._defaults)
|
||||
|
||||
def set(self, section, option, value=None):
|
||||
"""Set an option."""
|
||||
if not section or section == DEFAULTSECT:
|
||||
sectdict = self._defaults
|
||||
else:
|
||||
try:
|
||||
sectdict = self._sections[section]
|
||||
except KeyError:
|
||||
raise NoSectionError(section)
|
||||
sectdict[self.optionxform(option)] = value
|
||||
|
||||
def write(self, fp):
|
||||
"""Write an .ini-format representation of the configuration state."""
|
||||
if self._defaults:
|
||||
fp.write("[%s]\n" % DEFAULTSECT)
|
||||
for (key, value) in self._defaults.items():
|
||||
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
|
||||
fp.write("\n")
|
||||
for section in self._sections:
|
||||
fp.write("[%s]\n" % section)
|
||||
for (key, value) in self._sections[section].items():
|
||||
if key == "__name__":
|
||||
continue
|
||||
if (value is not None) or (self._optcre == self.OPTCRE):
|
||||
key = " = ".join((key, str(value).replace('\n', '\n\t')))
|
||||
fp.write("%s\n" % (key))
|
||||
fp.write("\n")
|
||||
|
||||
def remove_option(self, section, option):
|
||||
"""Remove an option."""
|
||||
if not section or section == DEFAULTSECT:
|
||||
sectdict = self._defaults
|
||||
else:
|
||||
try:
|
||||
sectdict = self._sections[section]
|
||||
except KeyError:
|
||||
raise NoSectionError(section)
|
||||
option = self.optionxform(option)
|
||||
existed = option in sectdict
|
||||
if existed:
|
||||
del sectdict[option]
|
||||
return existed
|
||||
|
||||
def remove_section(self, section):
|
||||
"""Remove a file section."""
|
||||
existed = section in self._sections
|
||||
if existed:
|
||||
del self._sections[section]
|
||||
return existed
|
||||
|
||||
#
|
||||
# Regular expressions for parsing section headers and options.
|
||||
#
|
||||
SECTCRE = re.compile(
|
||||
r'\[' # [
|
||||
r'(?P<header>[^]]+)' # very permissive!
|
||||
r'\]' # ]
|
||||
)
|
||||
OPTCRE = re.compile(
|
||||
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
|
||||
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
|
||||
# followed by separator
|
||||
# (either : or =), followed
|
||||
# by any # space/tab
|
||||
r'(?P<value>.*)$' # everything up to eol
|
||||
)
|
||||
OPTCRE_NV = re.compile(
|
||||
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
|
||||
r'\s*(?:' # any number of space/tab,
|
||||
r'(?P<vi>[:=])\s*' # optionally followed by
|
||||
# separator (either : or
|
||||
# =), followed by any #
|
||||
# space/tab
|
||||
r'(?P<value>.*))?$' # everything up to eol
|
||||
)
|
||||
|
||||
def _read(self, fp, fpname):
|
||||
"""Parse a sectioned setup file.
|
||||
|
||||
The sections in setup file contains a title line at the top,
|
||||
indicated by a name in square brackets (`[]'), plus key/value
|
||||
options lines, indicated by `name: value' format lines.
|
||||
Continuations are represented by an embedded newline then
|
||||
leading whitespace. Blank lines, lines beginning with a '#',
|
||||
and just about everything else are ignored.
|
||||
"""
|
||||
cursect = None # None, or a dictionary
|
||||
optname = None
|
||||
lineno = 0
|
||||
e = None # None, or an exception
|
||||
while True:
|
||||
line = fp.readline()
|
||||
if not line:
|
||||
break
|
||||
lineno = lineno + 1
|
||||
# comment or blank line?
|
||||
if line.strip() == '' or line[0] in '#;':
|
||||
continue
|
||||
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
|
||||
# no leading whitespace
|
||||
continue
|
||||
# continuation line?
|
||||
if line[0].isspace() and cursect is not None and optname:
|
||||
value = line.strip()
|
||||
if value:
|
||||
cursect[optname].append(value)
|
||||
# a section header or option header?
|
||||
else:
|
||||
# is it a section header?
|
||||
mo = self.SECTCRE.match(line)
|
||||
if mo:
|
||||
sectname = mo.group('header')
|
||||
if sectname in self._sections:
|
||||
cursect = self._sections[sectname]
|
||||
elif sectname == DEFAULTSECT:
|
||||
cursect = self._defaults
|
||||
else:
|
||||
cursect = self._dict()
|
||||
cursect['__name__'] = sectname
|
||||
self._sections[sectname] = cursect
|
||||
# So sections can't start with a continuation line
|
||||
optname = None
|
||||
# no section header in the file?
|
||||
elif cursect is None:
|
||||
raise MissingSectionHeaderError(fpname, lineno, line)
|
||||
# an option line?
|
||||
else:
|
||||
mo = self._optcre.match(line)
|
||||
if mo:
|
||||
optname, vi, optval = mo.group('option', 'vi', 'value')
|
||||
optname = self.optionxform(optname.rstrip())
|
||||
# This check is fine because the OPTCRE cannot
|
||||
# match if it would set optval to None
|
||||
if optval is not None:
|
||||
if vi in ('=', ':') and ';' in optval:
|
||||
# ';' is a comment delimiter only if it follows
|
||||
# a spacing character
|
||||
pos = optval.find(';')
|
||||
if pos != -1 and optval[pos-1].isspace():
|
||||
optval = optval[:pos]
|
||||
optval = optval.strip()
|
||||
# allow empty values
|
||||
if optval == '""':
|
||||
optval = ''
|
||||
cursect[optname] = [optval]
|
||||
else:
|
||||
# valueless option handling
|
||||
cursect[optname] = optval
|
||||
else:
|
||||
# a non-fatal parsing error occurred. set up the
|
||||
# exception but keep going. the exception will be
|
||||
# raised at the end of the file and will contain a
|
||||
# list of all bogus lines
|
||||
if not e:
|
||||
e = ParsingError(fpname)
|
||||
e.append(lineno, repr(line))
|
||||
# if any parsing errors occurred, raise an exception
|
||||
if e:
|
||||
raise e
|
||||
|
||||
# join the multi-line values collected while reading
|
||||
all_sections = [self._defaults]
|
||||
all_sections.extend(self._sections.values())
|
||||
for options in all_sections:
|
||||
for name, val in options.items():
|
||||
if isinstance(val, list):
|
||||
options[name] = '\n'.join(val)
|
||||
|
||||
import UserDict as _UserDict
|
||||
|
||||
class _Chainmap(_UserDict.DictMixin):
|
||||
"""Combine multiple mappings for successive lookups.
|
||||
|
||||
For example, to emulate Python's normal lookup sequence:
|
||||
|
||||
import __builtin__
|
||||
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
|
||||
"""
|
||||
|
||||
def __init__(self, *maps):
|
||||
self._maps = maps
|
||||
|
||||
def __getitem__(self, key):
|
||||
for mapping in self._maps:
|
||||
try:
|
||||
return mapping[key]
|
||||
except KeyError:
|
||||
pass
|
||||
raise KeyError(key)
|
||||
|
||||
def keys(self):
|
||||
result = []
|
||||
seen = set()
|
||||
for mapping in self._maps:
|
||||
for key in mapping:
|
||||
if key not in seen:
|
||||
result.append(key)
|
||||
seen.add(key)
|
||||
return result
|
||||
|
||||
class ConfigParser(RawConfigParser):
|
||||
|
||||
def get(self, section, option, raw=False, vars=None):
|
||||
"""Get an option value for a given section.
|
||||
|
||||
If `vars' is provided, it must be a dictionary. The option is looked up
|
||||
in `vars' (if provided), `section', and in `defaults' in that order.
|
||||
|
||||
All % interpolations are expanded in the return values, unless the
|
||||
optional argument `raw' is true. Values for interpolation keys are
|
||||
looked up in the same manner as the option.
|
||||
|
||||
The section DEFAULT is special.
|
||||
"""
|
||||
sectiondict = {}
|
||||
try:
|
||||
sectiondict = self._sections[section]
|
||||
except KeyError:
|
||||
if section != DEFAULTSECT:
|
||||
raise NoSectionError(section)
|
||||
# Update with the entry specific variables
|
||||
vardict = {}
|
||||
if vars:
|
||||
for key, value in vars.items():
|
||||
vardict[self.optionxform(key)] = value
|
||||
d = _Chainmap(vardict, sectiondict, self._defaults)
|
||||
option = self.optionxform(option)
|
||||
try:
|
||||
value = d[option]
|
||||
except KeyError:
|
||||
raise NoOptionError(option, section)
|
||||
|
||||
if raw or value is None:
|
||||
return value
|
||||
else:
|
||||
return self._interpolate(section, option, value, d)
|
||||
|
||||
def items(self, section, raw=False, vars=None):
|
||||
"""Return a list of tuples with (name, value) for each option
|
||||
in the section.
|
||||
|
||||
All % interpolations are expanded in the return values, based on the
|
||||
defaults passed into the constructor, unless the optional argument
|
||||
`raw' is true. Additional substitutions may be provided using the
|
||||
`vars' argument, which must be a dictionary whose contents overrides
|
||||
any pre-existing defaults.
|
||||
|
||||
The section DEFAULT is special.
|
||||
"""
|
||||
d = self._defaults.copy()
|
||||
try:
|
||||
d.update(self._sections[section])
|
||||
except KeyError:
|
||||
if section != DEFAULTSECT:
|
||||
raise NoSectionError(section)
|
||||
# Update with the entry specific variables
|
||||
if vars:
|
||||
for key, value in vars.items():
|
||||
d[self.optionxform(key)] = value
|
||||
options = d.keys()
|
||||
if "__name__" in options:
|
||||
options.remove("__name__")
|
||||
if raw:
|
||||
return [(option, d[option])
|
||||
for option in options]
|
||||
else:
|
||||
return [(option, self._interpolate(section, option, d[option], d))
|
||||
for option in options]
|
||||
|
||||
def _interpolate(self, section, option, rawval, vars):
|
||||
# do the string interpolation
|
||||
value = rawval
|
||||
depth = MAX_INTERPOLATION_DEPTH
|
||||
while depth: # Loop through this until it's done
|
||||
depth -= 1
|
||||
if value and "%(" in value:
|
||||
value = self._KEYCRE.sub(self._interpolation_replace, value)
|
||||
try:
|
||||
value = value % vars
|
||||
except KeyError, e:
|
||||
raise InterpolationMissingOptionError(
|
||||
option, section, rawval, e.args[0])
|
||||
else:
|
||||
break
|
||||
if value and "%(" in value:
|
||||
raise InterpolationDepthError(option, section, rawval)
|
||||
return value
|
||||
|
||||
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
|
||||
|
||||
def _interpolation_replace(self, match):
|
||||
s = match.group(1)
|
||||
if s is None:
|
||||
return match.group()
|
||||
else:
|
||||
return "%%(%s)s" % self.optionxform(s)
|
||||
|
||||
|
||||
class SafeConfigParser(ConfigParser):
|
||||
|
||||
def _interpolate(self, section, option, rawval, vars):
|
||||
# do the string interpolation
|
||||
L = []
|
||||
self._interpolate_some(option, L, rawval, section, vars, 1)
|
||||
return ''.join(L)
|
||||
|
||||
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
|
||||
|
||||
def _interpolate_some(self, option, accum, rest, section, map, depth):
|
||||
if depth > MAX_INTERPOLATION_DEPTH:
|
||||
raise InterpolationDepthError(option, section, rest)
|
||||
while rest:
|
||||
p = rest.find("%")
|
||||
if p < 0:
|
||||
accum.append(rest)
|
||||
return
|
||||
if p > 0:
|
||||
accum.append(rest[:p])
|
||||
rest = rest[p:]
|
||||
# p is no longer used
|
||||
c = rest[1:2]
|
||||
if c == "%":
|
||||
accum.append("%")
|
||||
rest = rest[2:]
|
||||
elif c == "(":
|
||||
m = self._interpvar_re.match(rest)
|
||||
if m is None:
|
||||
raise InterpolationSyntaxError(option, section,
|
||||
"bad interpolation variable reference %r" % rest)
|
||||
var = self.optionxform(m.group(1))
|
||||
rest = rest[m.end():]
|
||||
try:
|
||||
v = map[var]
|
||||
except KeyError:
|
||||
raise InterpolationMissingOptionError(
|
||||
option, section, rest, var)
|
||||
if "%" in v:
|
||||
self._interpolate_some(option, accum, v,
|
||||
section, map, depth + 1)
|
||||
else:
|
||||
accum.append(v)
|
||||
else:
|
||||
raise InterpolationSyntaxError(
|
||||
option, section,
|
||||
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
|
||||
|
||||
def set(self, section, option, value=None):
|
||||
"""Set an option. Extend ConfigParser.set: check for string values."""
|
||||
# The only legal non-string value if we allow valueless
|
||||
# options is None, so we need to check if the value is a
|
||||
# string if:
|
||||
# - we do not allow valueless options, or
|
||||
# - we allow valueless options but the value is not None
|
||||
if self._optcre is self.OPTCRE or value:
|
||||
if not isinstance(value, basestring):
|
||||
raise TypeError("option values must be strings")
|
||||
if value is not None:
|
||||
# check for bad percent signs:
|
||||
# first, replace all "good" interpolations
|
||||
tmp_value = value.replace('%%', '')
|
||||
tmp_value = self._interpvar_re.sub('', tmp_value)
|
||||
# then, check if there's a lone percent sign left
|
||||
if '%' in tmp_value:
|
||||
raise ValueError("invalid interpolation syntax in %r at "
|
||||
"position %d" % (value, tmp_value.find('%')))
|
||||
ConfigParser.set(self, section, option, value)
|
Binary file not shown.
758
PythonHome/Lib/Cookie.py
Normal file
758
PythonHome/Lib/Cookie.py
Normal file
@ -0,0 +1,758 @@
|
||||
####
|
||||
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software
|
||||
# and its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of
|
||||
# Timothy O'Malley not be used in advertising or publicity
|
||||
# pertaining to distribution of the software without specific, written
|
||||
# prior permission.
|
||||
#
|
||||
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
|
||||
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
|
||||
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
||||
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
# PERFORMANCE OF THIS SOFTWARE.
|
||||
#
|
||||
####
|
||||
#
|
||||
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
|
||||
# by Timothy O'Malley <timo@alum.mit.edu>
|
||||
#
|
||||
# Cookie.py is a Python module for the handling of HTTP
|
||||
# cookies as a Python dictionary. See RFC 2109 for more
|
||||
# information on cookies.
|
||||
#
|
||||
# The original idea to treat Cookies as a dictionary came from
|
||||
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
|
||||
# first version of nscookie.py.
|
||||
#
|
||||
####
|
||||
|
||||
r"""
|
||||
Here's a sample session to show how to use this module.
|
||||
At the moment, this is the only documentation.
|
||||
|
||||
The Basics
|
||||
----------
|
||||
|
||||
Importing is easy..
|
||||
|
||||
>>> import Cookie
|
||||
|
||||
Most of the time you start by creating a cookie. Cookies come in
|
||||
three flavors, each with slightly different encoding semantics, but
|
||||
more on that later.
|
||||
|
||||
>>> C = Cookie.SimpleCookie()
|
||||
>>> C = Cookie.SerialCookie()
|
||||
>>> C = Cookie.SmartCookie()
|
||||
|
||||
[Note: Long-time users of Cookie.py will remember using
|
||||
Cookie.Cookie() to create an Cookie object. Although deprecated, it
|
||||
is still supported by the code. See the Backward Compatibility notes
|
||||
for more information.]
|
||||
|
||||
Once you've created your Cookie, you can add values just as if it were
|
||||
a dictionary.
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C["fig"] = "newton"
|
||||
>>> C["sugar"] = "wafer"
|
||||
>>> C.output()
|
||||
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
|
||||
|
||||
Notice that the printable representation of a Cookie is the
|
||||
appropriate format for a Set-Cookie: header. This is the
|
||||
default behavior. You can change the header and printed
|
||||
attributes by using the .output() function
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C["rocky"] = "road"
|
||||
>>> C["rocky"]["path"] = "/cookie"
|
||||
>>> print C.output(header="Cookie:")
|
||||
Cookie: rocky=road; Path=/cookie
|
||||
>>> print C.output(attrs=[], header="Cookie:")
|
||||
Cookie: rocky=road
|
||||
|
||||
The load() method of a Cookie extracts cookies from a string. In a
|
||||
CGI script, you would use this method to extract the cookies from the
|
||||
HTTP_COOKIE environment variable.
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C.load("chips=ahoy; vienna=finger")
|
||||
>>> C.output()
|
||||
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
|
||||
|
||||
The load() method is darn-tootin smart about identifying cookies
|
||||
within a string. Escaped quotation marks, nested semicolons, and other
|
||||
such trickeries do not confuse it.
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
|
||||
>>> print C
|
||||
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
|
||||
|
||||
Each element of the Cookie also supports all of the RFC 2109
|
||||
Cookie attributes. Here's an example which sets the Path
|
||||
attribute.
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C["oreo"] = "doublestuff"
|
||||
>>> C["oreo"]["path"] = "/"
|
||||
>>> print C
|
||||
Set-Cookie: oreo=doublestuff; Path=/
|
||||
|
||||
Each dictionary element has a 'value' attribute, which gives you
|
||||
back the value associated with the key.
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C["twix"] = "none for you"
|
||||
>>> C["twix"].value
|
||||
'none for you'
|
||||
|
||||
|
||||
A Bit More Advanced
|
||||
-------------------
|
||||
|
||||
As mentioned before, there are three different flavors of Cookie
|
||||
objects, each with different encoding/decoding semantics. This
|
||||
section briefly discusses the differences.
|
||||
|
||||
SimpleCookie
|
||||
|
||||
The SimpleCookie expects that all values should be standard strings.
|
||||
Just to be sure, SimpleCookie invokes the str() builtin to convert
|
||||
the value to a string, when the values are set dictionary-style.
|
||||
|
||||
>>> C = Cookie.SimpleCookie()
|
||||
>>> C["number"] = 7
|
||||
>>> C["string"] = "seven"
|
||||
>>> C["number"].value
|
||||
'7'
|
||||
>>> C["string"].value
|
||||
'seven'
|
||||
>>> C.output()
|
||||
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
|
||||
|
||||
|
||||
SerialCookie
|
||||
|
||||
The SerialCookie expects that all values should be serialized using
|
||||
cPickle (or pickle, if cPickle isn't available). As a result of
|
||||
serializing, SerialCookie can save almost any Python object to a
|
||||
value, and recover the exact same object when the cookie has been
|
||||
returned. (SerialCookie can yield some strange-looking cookie
|
||||
values, however.)
|
||||
|
||||
>>> C = Cookie.SerialCookie()
|
||||
>>> C["number"] = 7
|
||||
>>> C["string"] = "seven"
|
||||
>>> C["number"].value
|
||||
7
|
||||
>>> C["string"].value
|
||||
'seven'
|
||||
>>> C.output()
|
||||
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
|
||||
|
||||
Be warned, however, if SerialCookie cannot de-serialize a value (because
|
||||
it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
|
||||
|
||||
|
||||
SmartCookie
|
||||
|
||||
The SmartCookie combines aspects of each of the other two flavors.
|
||||
When setting a value in a dictionary-fashion, the SmartCookie will
|
||||
serialize (ala cPickle) the value *if and only if* it isn't a
|
||||
Python string. String objects are *not* serialized. Similarly,
|
||||
when the load() method parses out values, it attempts to de-serialize
|
||||
the value. If it fails, then it fallsback to treating the value
|
||||
as a string.
|
||||
|
||||
>>> C = Cookie.SmartCookie()
|
||||
>>> C["number"] = 7
|
||||
>>> C["string"] = "seven"
|
||||
>>> C["number"].value
|
||||
7
|
||||
>>> C["string"].value
|
||||
'seven'
|
||||
>>> C.output()
|
||||
'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
|
||||
|
||||
|
||||
Backwards Compatibility
|
||||
-----------------------
|
||||
|
||||
In order to keep compatibilty with earlier versions of Cookie.py,
|
||||
it is still possible to use Cookie.Cookie() to create a Cookie. In
|
||||
fact, this simply returns a SmartCookie.
|
||||
|
||||
>>> C = Cookie.Cookie()
|
||||
>>> print C.__class__.__name__
|
||||
SmartCookie
|
||||
|
||||
|
||||
Finis.
|
||||
""" #"
|
||||
# ^
|
||||
# |----helps out font-lock
|
||||
|
||||
#
|
||||
# Import our required modules
|
||||
#
|
||||
import string
|
||||
|
||||
try:
|
||||
from cPickle import dumps, loads
|
||||
except ImportError:
|
||||
from pickle import dumps, loads
|
||||
|
||||
import re, warnings
|
||||
|
||||
__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
|
||||
"SmartCookie","Cookie"]
|
||||
|
||||
_nulljoin = ''.join
|
||||
_semispacejoin = '; '.join
|
||||
_spacejoin = ' '.join
|
||||
|
||||
#
|
||||
# Define an exception visible to External modules
|
||||
#
|
||||
class CookieError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# These quoting routines conform to the RFC2109 specification, which in
|
||||
# turn references the character definitions from RFC2068. They provide
|
||||
# a two-way quoting algorithm. Any non-text character is translated
|
||||
# into a 4 character sequence: a forward-slash followed by the
|
||||
# three-digit octal equivalent of the character. Any '\' or '"' is
|
||||
# quoted with a preceding '\' slash.
|
||||
#
|
||||
# These are taken from RFC2068 and RFC2109.
|
||||
# _LegalChars is the list of chars which don't require "'s
|
||||
# _Translator hash-table for fast quoting
|
||||
#
|
||||
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
|
||||
_Translator = {
|
||||
'\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
|
||||
'\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
|
||||
'\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
|
||||
'\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
|
||||
'\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
|
||||
'\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
|
||||
'\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
|
||||
'\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
|
||||
'\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
|
||||
'\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
|
||||
'\036' : '\\036', '\037' : '\\037',
|
||||
|
||||
# Because of the way browsers really handle cookies (as opposed
|
||||
# to what the RFC says) we also encode , and ;
|
||||
|
||||
',' : '\\054', ';' : '\\073',
|
||||
|
||||
'"' : '\\"', '\\' : '\\\\',
|
||||
|
||||
'\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
|
||||
'\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
|
||||
'\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
|
||||
'\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
|
||||
'\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
|
||||
'\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
|
||||
'\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
|
||||
'\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
|
||||
'\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
|
||||
'\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
|
||||
'\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
|
||||
'\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
|
||||
'\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
|
||||
'\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
|
||||
'\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
|
||||
'\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
|
||||
'\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
|
||||
'\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
|
||||
'\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
|
||||
'\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
|
||||
'\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
|
||||
'\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
|
||||
'\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
|
||||
'\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
|
||||
'\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
|
||||
'\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
|
||||
'\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
|
||||
'\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
|
||||
'\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
|
||||
'\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
|
||||
'\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
|
||||
'\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
|
||||
'\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
|
||||
'\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
|
||||
'\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
|
||||
'\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
|
||||
'\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
|
||||
'\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
|
||||
'\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
|
||||
'\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
|
||||
'\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
|
||||
'\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
|
||||
'\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
|
||||
}
|
||||
|
||||
_idmap = ''.join(chr(x) for x in xrange(256))
|
||||
|
||||
def _quote(str, LegalChars=_LegalChars,
|
||||
idmap=_idmap, translate=string.translate):
|
||||
#
|
||||
# If the string does not need to be double-quoted,
|
||||
# then just return the string. Otherwise, surround
|
||||
# the string in doublequotes and precede quote (with a \)
|
||||
# special characters.
|
||||
#
|
||||
if "" == translate(str, idmap, LegalChars):
|
||||
return str
|
||||
else:
|
||||
return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
|
||||
# end _quote
|
||||
|
||||
|
||||
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
|
||||
_QuotePatt = re.compile(r"[\\].")
|
||||
|
||||
def _unquote(str):
|
||||
# If there aren't any doublequotes,
|
||||
# then there can't be any special characters. See RFC 2109.
|
||||
if len(str) < 2:
|
||||
return str
|
||||
if str[0] != '"' or str[-1] != '"':
|
||||
return str
|
||||
|
||||
# We have to assume that we must decode this string.
|
||||
# Down to work.
|
||||
|
||||
# Remove the "s
|
||||
str = str[1:-1]
|
||||
|
||||
# Check for special sequences. Examples:
|
||||
# \012 --> \n
|
||||
# \" --> "
|
||||
#
|
||||
i = 0
|
||||
n = len(str)
|
||||
res = []
|
||||
while 0 <= i < n:
|
||||
Omatch = _OctalPatt.search(str, i)
|
||||
Qmatch = _QuotePatt.search(str, i)
|
||||
if not Omatch and not Qmatch: # Neither matched
|
||||
res.append(str[i:])
|
||||
break
|
||||
# else:
|
||||
j = k = -1
|
||||
if Omatch: j = Omatch.start(0)
|
||||
if Qmatch: k = Qmatch.start(0)
|
||||
if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
|
||||
res.append(str[i:k])
|
||||
res.append(str[k+1])
|
||||
i = k+2
|
||||
else: # OctalPatt matched
|
||||
res.append(str[i:j])
|
||||
res.append( chr( int(str[j+1:j+4], 8) ) )
|
||||
i = j+4
|
||||
return _nulljoin(res)
|
||||
# end _unquote
|
||||
|
||||
# The _getdate() routine is used to set the expiration time in
|
||||
# the cookie's HTTP header. By default, _getdate() returns the
|
||||
# current time in the appropriate "expires" format for a
|
||||
# Set-Cookie header. The one optional argument is an offset from
|
||||
# now, in seconds. For example, an offset of -3600 means "one hour ago".
|
||||
# The offset may be a floating point number.
|
||||
#
|
||||
|
||||
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||
|
||||
_monthname = [None,
|
||||
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
|
||||
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
|
||||
from time import gmtime, time
|
||||
now = time()
|
||||
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
|
||||
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
|
||||
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
|
||||
|
||||
|
||||
#
|
||||
# A class to hold ONE key,value pair.
|
||||
# In a cookie, each such pair may have several attributes.
|
||||
# so this class is used to keep the attributes associated
|
||||
# with the appropriate key,value pair.
|
||||
# This class also includes a coded_value attribute, which
|
||||
# is used to hold the network representation of the
|
||||
# value. This is most useful when Python objects are
|
||||
# pickled for network transit.
|
||||
#
|
||||
|
||||
class Morsel(dict):
|
||||
# RFC 2109 lists these attributes as reserved:
|
||||
# path comment domain
|
||||
# max-age secure version
|
||||
#
|
||||
# For historical reasons, these attributes are also reserved:
|
||||
# expires
|
||||
#
|
||||
# This is an extension from Microsoft:
|
||||
# httponly
|
||||
#
|
||||
# This dictionary provides a mapping from the lowercase
|
||||
# variant on the left to the appropriate traditional
|
||||
# formatting on the right.
|
||||
_reserved = { "expires" : "expires",
|
||||
"path" : "Path",
|
||||
"comment" : "Comment",
|
||||
"domain" : "Domain",
|
||||
"max-age" : "Max-Age",
|
||||
"secure" : "secure",
|
||||
"httponly" : "httponly",
|
||||
"version" : "Version",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
# Set defaults
|
||||
self.key = self.value = self.coded_value = None
|
||||
|
||||
# Set default attributes
|
||||
for K in self._reserved:
|
||||
dict.__setitem__(self, K, "")
|
||||
# end __init__
|
||||
|
||||
def __setitem__(self, K, V):
|
||||
K = K.lower()
|
||||
if not K in self._reserved:
|
||||
raise CookieError("Invalid Attribute %s" % K)
|
||||
dict.__setitem__(self, K, V)
|
||||
# end __setitem__
|
||||
|
||||
def isReservedKey(self, K):
|
||||
return K.lower() in self._reserved
|
||||
# end isReservedKey
|
||||
|
||||
def set(self, key, val, coded_val,
|
||||
LegalChars=_LegalChars,
|
||||
idmap=_idmap, translate=string.translate):
|
||||
# First we verify that the key isn't a reserved word
|
||||
# Second we make sure it only contains legal characters
|
||||
if key.lower() in self._reserved:
|
||||
raise CookieError("Attempt to set a reserved key: %s" % key)
|
||||
if "" != translate(key, idmap, LegalChars):
|
||||
raise CookieError("Illegal key value: %s" % key)
|
||||
|
||||
# It's a good key, so save it.
|
||||
self.key = key
|
||||
self.value = val
|
||||
self.coded_value = coded_val
|
||||
# end set
|
||||
|
||||
def output(self, attrs=None, header = "Set-Cookie:"):
|
||||
return "%s %s" % ( header, self.OutputString(attrs) )
|
||||
|
||||
__str__ = output
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s=%s>' % (self.__class__.__name__,
|
||||
self.key, repr(self.value) )
|
||||
|
||||
def js_output(self, attrs=None):
|
||||
# Print javascript
|
||||
return """
|
||||
<script type="text/javascript">
|
||||
<!-- begin hiding
|
||||
document.cookie = \"%s\";
|
||||
// end hiding -->
|
||||
</script>
|
||||
""" % ( self.OutputString(attrs).replace('"',r'\"'), )
|
||||
# end js_output()
|
||||
|
||||
def OutputString(self, attrs=None):
|
||||
# Build up our result
|
||||
#
|
||||
result = []
|
||||
RA = result.append
|
||||
|
||||
# First, the key=value pair
|
||||
RA("%s=%s" % (self.key, self.coded_value))
|
||||
|
||||
# Now add any defined attributes
|
||||
if attrs is None:
|
||||
attrs = self._reserved
|
||||
items = self.items()
|
||||
items.sort()
|
||||
for K,V in items:
|
||||
if V == "": continue
|
||||
if K not in attrs: continue
|
||||
if K == "expires" and type(V) == type(1):
|
||||
RA("%s=%s" % (self._reserved[K], _getdate(V)))
|
||||
elif K == "max-age" and type(V) == type(1):
|
||||
RA("%s=%d" % (self._reserved[K], V))
|
||||
elif K == "secure":
|
||||
RA(str(self._reserved[K]))
|
||||
elif K == "httponly":
|
||||
RA(str(self._reserved[K]))
|
||||
else:
|
||||
RA("%s=%s" % (self._reserved[K], V))
|
||||
|
||||
# Return the result
|
||||
return _semispacejoin(result)
|
||||
# end OutputString
|
||||
# end Morsel class
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Pattern for finding cookie
|
||||
#
|
||||
# This used to be strict parsing based on the RFC2109 and RFC2068
|
||||
# specifications. I have since discovered that MSIE 3.0x doesn't
|
||||
# follow the character rules outlined in those specs. As a
|
||||
# result, the parsing rules here are less strict.
|
||||
#
|
||||
|
||||
_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
|
||||
_CookiePattern = re.compile(
|
||||
r"(?x)" # This is a Verbose pattern
|
||||
r"(?P<key>" # Start of group 'key'
|
||||
""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
|
||||
r")" # End of group 'key'
|
||||
r"\s*=\s*" # Equal Sign
|
||||
r"(?P<val>" # Start of group 'val'
|
||||
r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
|
||||
r"|" # or
|
||||
r"\w{3},\s[\s\w\d-]{9,11}\s[\d:]{8}\sGMT" # Special case for "expires" attr
|
||||
r"|" # or
|
||||
""+ _LegalCharsPatt +"*" # Any word or empty string
|
||||
r")" # End of group 'val'
|
||||
r"\s*;?" # Probably ending in a semi-colon
|
||||
)
|
||||
|
||||
|
||||
# At long last, here is the cookie class.
|
||||
# Using this class is almost just like using a dictionary.
|
||||
# See this module's docstring for example usage.
|
||||
#
|
||||
class BaseCookie(dict):
|
||||
# A container class for a set of Morsels
|
||||
#
|
||||
|
||||
def value_decode(self, val):
|
||||
"""real_value, coded_value = value_decode(STRING)
|
||||
Called prior to setting a cookie's value from the network
|
||||
representation. The VALUE is the value read from HTTP
|
||||
header.
|
||||
Override this function to modify the behavior of cookies.
|
||||
"""
|
||||
return val, val
|
||||
# end value_encode
|
||||
|
||||
def value_encode(self, val):
|
||||
"""real_value, coded_value = value_encode(VALUE)
|
||||
Called prior to setting a cookie's value from the dictionary
|
||||
representation. The VALUE is the value being assigned.
|
||||
Override this function to modify the behavior of cookies.
|
||||
"""
|
||||
strval = str(val)
|
||||
return strval, strval
|
||||
# end value_encode
|
||||
|
||||
def __init__(self, input=None):
|
||||
if input: self.load(input)
|
||||
# end __init__
|
||||
|
||||
def __set(self, key, real_value, coded_value):
|
||||
"""Private method for setting a cookie's value"""
|
||||
M = self.get(key, Morsel())
|
||||
M.set(key, real_value, coded_value)
|
||||
dict.__setitem__(self, key, M)
|
||||
# end __set
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Dictionary style assignment."""
|
||||
rval, cval = self.value_encode(value)
|
||||
self.__set(key, rval, cval)
|
||||
# end __setitem__
|
||||
|
||||
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
|
||||
"""Return a string suitable for HTTP."""
|
||||
result = []
|
||||
items = self.items()
|
||||
items.sort()
|
||||
for K,V in items:
|
||||
result.append( V.output(attrs, header) )
|
||||
return sep.join(result)
|
||||
# end output
|
||||
|
||||
__str__ = output
|
||||
|
||||
def __repr__(self):
|
||||
L = []
|
||||
items = self.items()
|
||||
items.sort()
|
||||
for K,V in items:
|
||||
L.append( '%s=%s' % (K,repr(V.value) ) )
|
||||
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
|
||||
|
||||
def js_output(self, attrs=None):
|
||||
"""Return a string suitable for JavaScript."""
|
||||
result = []
|
||||
items = self.items()
|
||||
items.sort()
|
||||
for K,V in items:
|
||||
result.append( V.js_output(attrs) )
|
||||
return _nulljoin(result)
|
||||
# end js_output
|
||||
|
||||
def load(self, rawdata):
|
||||
"""Load cookies from a string (presumably HTTP_COOKIE) or
|
||||
from a dictionary. Loading cookies from a dictionary 'd'
|
||||
is equivalent to calling:
|
||||
map(Cookie.__setitem__, d.keys(), d.values())
|
||||
"""
|
||||
if type(rawdata) == type(""):
|
||||
self.__ParseString(rawdata)
|
||||
else:
|
||||
# self.update() wouldn't call our custom __setitem__
|
||||
for k, v in rawdata.items():
|
||||
self[k] = v
|
||||
return
|
||||
# end load()
|
||||
|
||||
def __ParseString(self, str, patt=_CookiePattern):
|
||||
i = 0 # Our starting point
|
||||
n = len(str) # Length of string
|
||||
M = None # current morsel
|
||||
|
||||
while 0 <= i < n:
|
||||
# Start looking for a cookie
|
||||
match = patt.search(str, i)
|
||||
if not match: break # No more cookies
|
||||
|
||||
K,V = match.group("key"), match.group("val")
|
||||
i = match.end(0)
|
||||
|
||||
# Parse the key, value in case it's metainfo
|
||||
if K[0] == "$":
|
||||
# We ignore attributes which pertain to the cookie
|
||||
# mechanism as a whole. See RFC 2109.
|
||||
# (Does anyone care?)
|
||||
if M:
|
||||
M[ K[1:] ] = V
|
||||
elif K.lower() in Morsel._reserved:
|
||||
if M:
|
||||
M[ K ] = _unquote(V)
|
||||
else:
|
||||
rval, cval = self.value_decode(V)
|
||||
self.__set(K, rval, cval)
|
||||
M = self[K]
|
||||
# end __ParseString
|
||||
# end BaseCookie class
|
||||
|
||||
class SimpleCookie(BaseCookie):
|
||||
"""SimpleCookie
|
||||
SimpleCookie supports strings as cookie values. When setting
|
||||
the value using the dictionary assignment notation, SimpleCookie
|
||||
calls the builtin str() to convert the value to a string. Values
|
||||
received from HTTP are kept as strings.
|
||||
"""
|
||||
def value_decode(self, val):
|
||||
return _unquote( val ), val
|
||||
def value_encode(self, val):
|
||||
strval = str(val)
|
||||
return strval, _quote( strval )
|
||||
# end SimpleCookie
|
||||
|
||||
class SerialCookie(BaseCookie):
|
||||
"""SerialCookie
|
||||
SerialCookie supports arbitrary objects as cookie values. All
|
||||
values are serialized (using cPickle) before being sent to the
|
||||
client. All incoming values are assumed to be valid Pickle
|
||||
representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
|
||||
FORMAT, THEN AN EXCEPTION WILL BE RAISED.
|
||||
|
||||
Note: Large cookie values add overhead because they must be
|
||||
retransmitted on every HTTP transaction.
|
||||
|
||||
Note: HTTP has a 2k limit on the size of a cookie. This class
|
||||
does not check for this limit, so be careful!!!
|
||||
"""
|
||||
def __init__(self, input=None):
|
||||
warnings.warn("SerialCookie class is insecure; do not use it",
|
||||
DeprecationWarning)
|
||||
BaseCookie.__init__(self, input)
|
||||
# end __init__
|
||||
def value_decode(self, val):
|
||||
# This could raise an exception!
|
||||
return loads( _unquote(val) ), val
|
||||
def value_encode(self, val):
|
||||
return val, _quote( dumps(val) )
|
||||
# end SerialCookie
|
||||
|
||||
class SmartCookie(BaseCookie):
|
||||
"""SmartCookie
|
||||
SmartCookie supports arbitrary objects as cookie values. If the
|
||||
object is a string, then it is quoted. If the object is not a
|
||||
string, however, then SmartCookie will use cPickle to serialize
|
||||
the object into a string representation.
|
||||
|
||||
Note: Large cookie values add overhead because they must be
|
||||
retransmitted on every HTTP transaction.
|
||||
|
||||
Note: HTTP has a 2k limit on the size of a cookie. This class
|
||||
does not check for this limit, so be careful!!!
|
||||
"""
|
||||
def __init__(self, input=None):
|
||||
warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
|
||||
DeprecationWarning)
|
||||
BaseCookie.__init__(self, input)
|
||||
# end __init__
|
||||
def value_decode(self, val):
|
||||
strval = _unquote(val)
|
||||
try:
|
||||
return loads(strval), val
|
||||
except:
|
||||
return strval, val
|
||||
def value_encode(self, val):
|
||||
if type(val) == type(""):
|
||||
return val, _quote(val)
|
||||
else:
|
||||
return val, _quote( dumps(val) )
|
||||
# end SmartCookie
|
||||
|
||||
|
||||
###########################################################
|
||||
# Backwards Compatibility: Don't break any existing code!
|
||||
|
||||
# We provide Cookie() as an alias for SmartCookie()
|
||||
Cookie = SmartCookie
|
||||
|
||||
#
|
||||
###########################################################
|
||||
|
||||
def _test():
|
||||
import doctest, Cookie
|
||||
return doctest.testmod(Cookie)
|
||||
|
||||
if __name__ == "__main__":
|
||||
_test()
|
||||
|
||||
|
||||
#Local Variables:
|
||||
#tab-width: 4
|
||||
#end:
|
Binary file not shown.
279
PythonHome/Lib/DocXMLRPCServer.py
Normal file
279
PythonHome/Lib/DocXMLRPCServer.py
Normal file
@ -0,0 +1,279 @@
|
||||
"""Self documenting XML-RPC Server.
|
||||
|
||||
This module can be used to create XML-RPC servers that
|
||||
serve pydoc-style documentation in response to HTTP
|
||||
GET requests. This documentation is dynamically generated
|
||||
based on the functions and methods registered with the
|
||||
server.
|
||||
|
||||
This module is built upon the pydoc and SimpleXMLRPCServer
|
||||
modules.
|
||||
"""
|
||||
|
||||
import pydoc
|
||||
import inspect
|
||||
import re
|
||||
import sys
|
||||
|
||||
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
|
||||
SimpleXMLRPCRequestHandler,
|
||||
CGIXMLRPCRequestHandler,
|
||||
resolve_dotted_attribute)
|
||||
|
||||
class ServerHTMLDoc(pydoc.HTMLDoc):
|
||||
"""Class used to generate pydoc HTML document for a server"""
|
||||
|
||||
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
|
||||
"""Mark up some plain text, given a context of symbols to look for.
|
||||
Each context dictionary maps object names to anchor names."""
|
||||
escape = escape or self.escape
|
||||
results = []
|
||||
here = 0
|
||||
|
||||
# XXX Note that this regular expression does not allow for the
|
||||
# hyperlinking of arbitrary strings being used as method
|
||||
# names. Only methods with names consisting of word characters
|
||||
# and '.'s are hyperlinked.
|
||||
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
|
||||
r'RFC[- ]?(\d+)|'
|
||||
r'PEP[- ]?(\d+)|'
|
||||
r'(self\.)?((?:\w|\.)+))\b')
|
||||
while 1:
|
||||
match = pattern.search(text, here)
|
||||
if not match: break
|
||||
start, end = match.span()
|
||||
results.append(escape(text[here:start]))
|
||||
|
||||
all, scheme, rfc, pep, selfdot, name = match.groups()
|
||||
if scheme:
|
||||
url = escape(all).replace('"', '"')
|
||||
results.append('<a href="%s">%s</a>' % (url, url))
|
||||
elif rfc:
|
||||
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
|
||||
results.append('<a href="%s">%s</a>' % (url, escape(all)))
|
||||
elif pep:
|
||||
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
|
||||
results.append('<a href="%s">%s</a>' % (url, escape(all)))
|
||||
elif text[end:end+1] == '(':
|
||||
results.append(self.namelink(name, methods, funcs, classes))
|
||||
elif selfdot:
|
||||
results.append('self.<strong>%s</strong>' % name)
|
||||
else:
|
||||
results.append(self.namelink(name, classes))
|
||||
here = end
|
||||
results.append(escape(text[here:]))
|
||||
return ''.join(results)
|
||||
|
||||
def docroutine(self, object, name, mod=None,
|
||||
funcs={}, classes={}, methods={}, cl=None):
|
||||
"""Produce HTML documentation for a function or method object."""
|
||||
|
||||
anchor = (cl and cl.__name__ or '') + '-' + name
|
||||
note = ''
|
||||
|
||||
title = '<a name="%s"><strong>%s</strong></a>' % (
|
||||
self.escape(anchor), self.escape(name))
|
||||
|
||||
if inspect.ismethod(object):
|
||||
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
|
||||
# exclude the argument bound to the instance, it will be
|
||||
# confusing to the non-Python user
|
||||
argspec = inspect.formatargspec (
|
||||
args[1:],
|
||||
varargs,
|
||||
varkw,
|
||||
defaults,
|
||||
formatvalue=self.formatvalue
|
||||
)
|
||||
elif inspect.isfunction(object):
|
||||
args, varargs, varkw, defaults = inspect.getargspec(object)
|
||||
argspec = inspect.formatargspec(
|
||||
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
|
||||
else:
|
||||
argspec = '(...)'
|
||||
|
||||
if isinstance(object, tuple):
|
||||
argspec = object[0] or argspec
|
||||
docstring = object[1] or ""
|
||||
else:
|
||||
docstring = pydoc.getdoc(object)
|
||||
|
||||
decl = title + argspec + (note and self.grey(
|
||||
'<font face="helvetica, arial">%s</font>' % note))
|
||||
|
||||
doc = self.markup(
|
||||
docstring, self.preformat, funcs, classes, methods)
|
||||
doc = doc and '<dd><tt>%s</tt></dd>' % doc
|
||||
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
|
||||
|
||||
def docserver(self, server_name, package_documentation, methods):
|
||||
"""Produce HTML documentation for an XML-RPC server."""
|
||||
|
||||
fdict = {}
|
||||
for key, value in methods.items():
|
||||
fdict[key] = '#-' + key
|
||||
fdict[value] = fdict[key]
|
||||
|
||||
server_name = self.escape(server_name)
|
||||
head = '<big><big><strong>%s</strong></big></big>' % server_name
|
||||
result = self.heading(head, '#ffffff', '#7799ee')
|
||||
|
||||
doc = self.markup(package_documentation, self.preformat, fdict)
|
||||
doc = doc and '<tt>%s</tt>' % doc
|
||||
result = result + '<p>%s</p>\n' % doc
|
||||
|
||||
contents = []
|
||||
method_items = sorted(methods.items())
|
||||
for key, value in method_items:
|
||||
contents.append(self.docroutine(value, key, funcs=fdict))
|
||||
result = result + self.bigsection(
|
||||
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
|
||||
|
||||
return result
|
||||
|
||||
class XMLRPCDocGenerator:
|
||||
"""Generates documentation for an XML-RPC server.
|
||||
|
||||
This class is designed as mix-in and should not
|
||||
be constructed directly.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# setup variables used for HTML documentation
|
||||
self.server_name = 'XML-RPC Server Documentation'
|
||||
self.server_documentation = \
|
||||
"This server exports the following methods through the XML-RPC "\
|
||||
"protocol."
|
||||
self.server_title = 'XML-RPC Server Documentation'
|
||||
|
||||
def set_server_title(self, server_title):
|
||||
"""Set the HTML title of the generated server documentation"""
|
||||
|
||||
self.server_title = server_title
|
||||
|
||||
def set_server_name(self, server_name):
|
||||
"""Set the name of the generated HTML server documentation"""
|
||||
|
||||
self.server_name = server_name
|
||||
|
||||
def set_server_documentation(self, server_documentation):
|
||||
"""Set the documentation string for the entire server."""
|
||||
|
||||
self.server_documentation = server_documentation
|
||||
|
||||
def generate_html_documentation(self):
|
||||
"""generate_html_documentation() => html documentation for the server
|
||||
|
||||
Generates HTML documentation for the server using introspection for
|
||||
installed functions and instances that do not implement the
|
||||
_dispatch method. Alternatively, instances can choose to implement
|
||||
the _get_method_argstring(method_name) method to provide the
|
||||
argument string used in the documentation and the
|
||||
_methodHelp(method_name) method to provide the help text used
|
||||
in the documentation."""
|
||||
|
||||
methods = {}
|
||||
|
||||
for method_name in self.system_listMethods():
|
||||
if method_name in self.funcs:
|
||||
method = self.funcs[method_name]
|
||||
elif self.instance is not None:
|
||||
method_info = [None, None] # argspec, documentation
|
||||
if hasattr(self.instance, '_get_method_argstring'):
|
||||
method_info[0] = self.instance._get_method_argstring(method_name)
|
||||
if hasattr(self.instance, '_methodHelp'):
|
||||
method_info[1] = self.instance._methodHelp(method_name)
|
||||
|
||||
method_info = tuple(method_info)
|
||||
if method_info != (None, None):
|
||||
method = method_info
|
||||
elif not hasattr(self.instance, '_dispatch'):
|
||||
try:
|
||||
method = resolve_dotted_attribute(
|
||||
self.instance,
|
||||
method_name
|
||||
)
|
||||
except AttributeError:
|
||||
method = method_info
|
||||
else:
|
||||
method = method_info
|
||||
else:
|
||||
assert 0, "Could not find method in self.functions and no "\
|
||||
"instance installed"
|
||||
|
||||
methods[method_name] = method
|
||||
|
||||
documenter = ServerHTMLDoc()
|
||||
documentation = documenter.docserver(
|
||||
self.server_name,
|
||||
self.server_documentation,
|
||||
methods
|
||||
)
|
||||
|
||||
return documenter.page(self.server_title, documentation)
|
||||
|
||||
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
"""XML-RPC and documentation request handler class.
|
||||
|
||||
Handles all HTTP POST requests and attempts to decode them as
|
||||
XML-RPC requests.
|
||||
|
||||
Handles all HTTP GET requests and interprets them as requests
|
||||
for documentation.
|
||||
"""
|
||||
|
||||
def do_GET(self):
|
||||
"""Handles the HTTP GET request.
|
||||
|
||||
Interpret all HTTP GET requests as requests for server
|
||||
documentation.
|
||||
"""
|
||||
# Check that the path is legal
|
||||
if not self.is_rpc_path_valid():
|
||||
self.report_404()
|
||||
return
|
||||
|
||||
response = self.server.generate_html_documentation()
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/html")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
class DocXMLRPCServer( SimpleXMLRPCServer,
|
||||
XMLRPCDocGenerator):
|
||||
"""XML-RPC and HTML documentation server.
|
||||
|
||||
Adds the ability to serve server documentation to the capabilities
|
||||
of SimpleXMLRPCServer.
|
||||
"""
|
||||
|
||||
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
|
||||
logRequests=1, allow_none=False, encoding=None,
|
||||
bind_and_activate=True):
|
||||
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
|
||||
allow_none, encoding, bind_and_activate)
|
||||
XMLRPCDocGenerator.__init__(self)
|
||||
|
||||
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
|
||||
XMLRPCDocGenerator):
|
||||
"""Handler for XML-RPC data and documentation requests passed through
|
||||
CGI"""
|
||||
|
||||
def handle_get(self):
|
||||
"""Handles the HTTP GET request.
|
||||
|
||||
Interpret all HTTP GET requests as requests for server
|
||||
documentation.
|
||||
"""
|
||||
|
||||
response = self.generate_html_documentation()
|
||||
|
||||
print 'Content-Type: text/html'
|
||||
print 'Content-Length: %d' % len(response)
|
||||
print
|
||||
sys.stdout.write(response)
|
||||
|
||||
def __init__(self):
|
||||
CGIXMLRPCRequestHandler.__init__(self)
|
||||
XMLRPCDocGenerator.__init__(self)
|
Binary file not shown.
475
PythonHome/Lib/HTMLParser.py
Normal file
475
PythonHome/Lib/HTMLParser.py
Normal file
@ -0,0 +1,475 @@
|
||||
"""A parser for HTML and XHTML."""
|
||||
|
||||
# This file is based on sgmllib.py, but the API is slightly different.
|
||||
|
||||
# XXX There should be a way to distinguish between PCDATA (parsed
|
||||
# character data -- the normal case), RCDATA (replaceable character
|
||||
# data -- only char and entity references and end tags are special)
|
||||
# and CDATA (character data -- only end tags are special).
|
||||
|
||||
|
||||
import markupbase
|
||||
import re
|
||||
|
||||
# Regular expressions used for parsing
|
||||
|
||||
interesting_normal = re.compile('[&<]')
|
||||
incomplete = re.compile('&[a-zA-Z#]')
|
||||
|
||||
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
|
||||
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
|
||||
|
||||
starttagopen = re.compile('<[a-zA-Z]')
|
||||
piclose = re.compile('>')
|
||||
commentclose = re.compile(r'--\s*>')
|
||||
|
||||
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
|
||||
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
|
||||
# note: if you change tagfind/attrfind remember to update locatestarttagend too
|
||||
tagfind = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
|
||||
# this regex is currently unused, but left for backward compatibility
|
||||
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
|
||||
|
||||
attrfind = re.compile(
|
||||
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
||||
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
|
||||
|
||||
locatestarttagend = re.compile(r"""
|
||||
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
|
||||
(?:[\s/]* # optional whitespace before attribute name
|
||||
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
|
||||
(?:\s*=+\s* # value indicator
|
||||
(?:'[^']*' # LITA-enclosed value
|
||||
|"[^"]*" # LIT-enclosed value
|
||||
|(?!['"])[^>\s]* # bare value
|
||||
)
|
||||
)?(?:\s|/(?!>))*
|
||||
)*
|
||||
)?
|
||||
\s* # trailing whitespace
|
||||
""", re.VERBOSE)
|
||||
endendtag = re.compile('>')
|
||||
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
|
||||
# </ and the tag name, so maybe this should be fixed
|
||||
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
|
||||
|
||||
|
||||
class HTMLParseError(Exception):
|
||||
"""Exception raised for all parse errors."""
|
||||
|
||||
def __init__(self, msg, position=(None, None)):
|
||||
assert msg
|
||||
self.msg = msg
|
||||
self.lineno = position[0]
|
||||
self.offset = position[1]
|
||||
|
||||
def __str__(self):
|
||||
result = self.msg
|
||||
if self.lineno is not None:
|
||||
result = result + ", at line %d" % self.lineno
|
||||
if self.offset is not None:
|
||||
result = result + ", column %d" % (self.offset + 1)
|
||||
return result
|
||||
|
||||
|
||||
class HTMLParser(markupbase.ParserBase):
|
||||
"""Find tags and other markup and call handler functions.
|
||||
|
||||
Usage:
|
||||
p = HTMLParser()
|
||||
p.feed(data)
|
||||
...
|
||||
p.close()
|
||||
|
||||
Start tags are handled by calling self.handle_starttag() or
|
||||
self.handle_startendtag(); end tags by self.handle_endtag(). The
|
||||
data between tags is passed from the parser to the derived class
|
||||
by calling self.handle_data() with the data as argument (the data
|
||||
may be split up in arbitrary chunks). Entity references are
|
||||
passed by calling self.handle_entityref() with the entity
|
||||
reference as the argument. Numeric character references are
|
||||
passed to self.handle_charref() with the string containing the
|
||||
reference as the argument.
|
||||
"""
|
||||
|
||||
CDATA_CONTENT_ELEMENTS = ("script", "style")
|
||||
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize and reset this instance."""
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""Reset this instance. Loses all unprocessed data."""
|
||||
self.rawdata = ''
|
||||
self.lasttag = '???'
|
||||
self.interesting = interesting_normal
|
||||
self.cdata_elem = None
|
||||
markupbase.ParserBase.reset(self)
|
||||
|
||||
def feed(self, data):
|
||||
r"""Feed data to the parser.
|
||||
|
||||
Call this as often as you want, with as little or as much text
|
||||
as you want (may include '\n').
|
||||
"""
|
||||
self.rawdata = self.rawdata + data
|
||||
self.goahead(0)
|
||||
|
||||
def close(self):
|
||||
"""Handle any buffered data."""
|
||||
self.goahead(1)
|
||||
|
||||
def error(self, message):
|
||||
raise HTMLParseError(message, self.getpos())
|
||||
|
||||
__starttag_text = None
|
||||
|
||||
def get_starttag_text(self):
|
||||
"""Return full source of start tag: '<...>'."""
|
||||
return self.__starttag_text
|
||||
|
||||
def set_cdata_mode(self, elem):
|
||||
self.cdata_elem = elem.lower()
|
||||
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
|
||||
|
||||
def clear_cdata_mode(self):
|
||||
self.interesting = interesting_normal
|
||||
self.cdata_elem = None
|
||||
|
||||
# Internal -- handle data as far as reasonable. May leave state
|
||||
# and data to be processed by a subsequent call. If 'end' is
|
||||
# true, force handling all data as if followed by EOF marker.
|
||||
def goahead(self, end):
|
||||
rawdata = self.rawdata
|
||||
i = 0
|
||||
n = len(rawdata)
|
||||
while i < n:
|
||||
match = self.interesting.search(rawdata, i) # < or &
|
||||
if match:
|
||||
j = match.start()
|
||||
else:
|
||||
if self.cdata_elem:
|
||||
break
|
||||
j = n
|
||||
if i < j: self.handle_data(rawdata[i:j])
|
||||
i = self.updatepos(i, j)
|
||||
if i == n: break
|
||||
startswith = rawdata.startswith
|
||||
if startswith('<', i):
|
||||
if starttagopen.match(rawdata, i): # < + letter
|
||||
k = self.parse_starttag(i)
|
||||
elif startswith("</", i):
|
||||
k = self.parse_endtag(i)
|
||||
elif startswith("<!--", i):
|
||||
k = self.parse_comment(i)
|
||||
elif startswith("<?", i):
|
||||
k = self.parse_pi(i)
|
||||
elif startswith("<!", i):
|
||||
k = self.parse_html_declaration(i)
|
||||
elif (i + 1) < n:
|
||||
self.handle_data("<")
|
||||
k = i + 1
|
||||
else:
|
||||
break
|
||||
if k < 0:
|
||||
if not end:
|
||||
break
|
||||
k = rawdata.find('>', i + 1)
|
||||
if k < 0:
|
||||
k = rawdata.find('<', i + 1)
|
||||
if k < 0:
|
||||
k = i + 1
|
||||
else:
|
||||
k += 1
|
||||
self.handle_data(rawdata[i:k])
|
||||
i = self.updatepos(i, k)
|
||||
elif startswith("&#", i):
|
||||
match = charref.match(rawdata, i)
|
||||
if match:
|
||||
name = match.group()[2:-1]
|
||||
self.handle_charref(name)
|
||||
k = match.end()
|
||||
if not startswith(';', k-1):
|
||||
k = k - 1
|
||||
i = self.updatepos(i, k)
|
||||
continue
|
||||
else:
|
||||
if ";" in rawdata[i:]: # bail by consuming '&#'
|
||||
self.handle_data(rawdata[i:i+2])
|
||||
i = self.updatepos(i, i+2)
|
||||
break
|
||||
elif startswith('&', i):
|
||||
match = entityref.match(rawdata, i)
|
||||
if match:
|
||||
name = match.group(1)
|
||||
self.handle_entityref(name)
|
||||
k = match.end()
|
||||
if not startswith(';', k-1):
|
||||
k = k - 1
|
||||
i = self.updatepos(i, k)
|
||||
continue
|
||||
match = incomplete.match(rawdata, i)
|
||||
if match:
|
||||
# match.group() will contain at least 2 chars
|
||||
if end and match.group() == rawdata[i:]:
|
||||
self.error("EOF in middle of entity or char ref")
|
||||
# incomplete
|
||||
break
|
||||
elif (i + 1) < n:
|
||||
# not the end of the buffer, and can't be confused
|
||||
# with some other construct
|
||||
self.handle_data("&")
|
||||
i = self.updatepos(i, i + 1)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
assert 0, "interesting.search() lied"
|
||||
# end while
|
||||
if end and i < n and not self.cdata_elem:
|
||||
self.handle_data(rawdata[i:n])
|
||||
i = self.updatepos(i, n)
|
||||
self.rawdata = rawdata[i:]
|
||||
|
||||
# Internal -- parse html declarations, return length or -1 if not terminated
|
||||
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
|
||||
# See also parse_declaration in _markupbase
|
||||
def parse_html_declaration(self, i):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+2] != '<!':
|
||||
self.error('unexpected call to parse_html_declaration()')
|
||||
if rawdata[i:i+4] == '<!--':
|
||||
# this case is actually already handled in goahead()
|
||||
return self.parse_comment(i)
|
||||
elif rawdata[i:i+3] == '<![':
|
||||
return self.parse_marked_section(i)
|
||||
elif rawdata[i:i+9].lower() == '<!doctype':
|
||||
# find the closing >
|
||||
gtpos = rawdata.find('>', i+9)
|
||||
if gtpos == -1:
|
||||
return -1
|
||||
self.handle_decl(rawdata[i+2:gtpos])
|
||||
return gtpos+1
|
||||
else:
|
||||
return self.parse_bogus_comment(i)
|
||||
|
||||
# Internal -- parse bogus comment, return length or -1 if not terminated
|
||||
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
|
||||
def parse_bogus_comment(self, i, report=1):
|
||||
rawdata = self.rawdata
|
||||
if rawdata[i:i+2] not in ('<!', '</'):
|
||||
self.error('unexpected call to parse_comment()')
|
||||
pos = rawdata.find('>', i+2)
|
||||
if pos == -1:
|
||||
return -1
|
||||
if report:
|
||||
self.handle_comment(rawdata[i+2:pos])
|
||||
return pos + 1
|
||||
|
||||
# Internal -- parse processing instr, return end or -1 if not terminated
|
||||
def parse_pi(self, i):
|
||||
rawdata = self.rawdata
|
||||
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
|
||||
match = piclose.search(rawdata, i+2) # >
|
||||
if not match:
|
||||
return -1
|
||||
j = match.start()
|
||||
self.handle_pi(rawdata[i+2: j])
|
||||
j = match.end()
|
||||
return j
|
||||
|
||||
# Internal -- handle starttag, return end or -1 if not terminated
|
||||
def parse_starttag(self, i):
|
||||
self.__starttag_text = None
|
||||
endpos = self.check_for_whole_start_tag(i)
|
||||
if endpos < 0:
|
||||
return endpos
|
||||
rawdata = self.rawdata
|
||||
self.__starttag_text = rawdata[i:endpos]
|
||||
|
||||
# Now parse the data between i+1 and j into a tag and attrs
|
||||
attrs = []
|
||||
match = tagfind.match(rawdata, i+1)
|
||||
assert match, 'unexpected call to parse_starttag()'
|
||||
k = match.end()
|
||||
self.lasttag = tag = match.group(1).lower()
|
||||
|
||||
while k < endpos:
|
||||
m = attrfind.match(rawdata, k)
|
||||
if not m:
|
||||
break
|
||||
attrname, rest, attrvalue = m.group(1, 2, 3)
|
||||
if not rest:
|
||||
attrvalue = None
|
||||
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
|
||||
attrvalue[:1] == '"' == attrvalue[-1:]:
|
||||
attrvalue = attrvalue[1:-1]
|
||||
if attrvalue:
|
||||
attrvalue = self.unescape(attrvalue)
|
||||
attrs.append((attrname.lower(), attrvalue))
|
||||
k = m.end()
|
||||
|
||||
end = rawdata[k:endpos].strip()
|
||||
if end not in (">", "/>"):
|
||||
lineno, offset = self.getpos()
|
||||
if "\n" in self.__starttag_text:
|
||||
lineno = lineno + self.__starttag_text.count("\n")
|
||||
offset = len(self.__starttag_text) \
|
||||
- self.__starttag_text.rfind("\n")
|
||||
else:
|
||||
offset = offset + len(self.__starttag_text)
|
||||
self.handle_data(rawdata[i:endpos])
|
||||
return endpos
|
||||
if end.endswith('/>'):
|
||||
# XHTML-style empty tag: <span attr="value" />
|
||||
self.handle_startendtag(tag, attrs)
|
||||
else:
|
||||
self.handle_starttag(tag, attrs)
|
||||
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||
self.set_cdata_mode(tag)
|
||||
return endpos
|
||||
|
||||
# Internal -- check to see if we have a complete starttag; return end
|
||||
# or -1 if incomplete.
|
||||
def check_for_whole_start_tag(self, i):
|
||||
rawdata = self.rawdata
|
||||
m = locatestarttagend.match(rawdata, i)
|
||||
if m:
|
||||
j = m.end()
|
||||
next = rawdata[j:j+1]
|
||||
if next == ">":
|
||||
return j + 1
|
||||
if next == "/":
|
||||
if rawdata.startswith("/>", j):
|
||||
return j + 2
|
||||
if rawdata.startswith("/", j):
|
||||
# buffer boundary
|
||||
return -1
|
||||
# else bogus input
|
||||
self.updatepos(i, j + 1)
|
||||
self.error("malformed empty start tag")
|
||||
if next == "":
|
||||
# end of input
|
||||
return -1
|
||||
if next in ("abcdefghijklmnopqrstuvwxyz=/"
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
|
||||
# end of input in or before attribute value, or we have the
|
||||
# '/' from a '/>' ending
|
||||
return -1
|
||||
if j > i:
|
||||
return j
|
||||
else:
|
||||
return i + 1
|
||||
raise AssertionError("we should not get here!")
|
||||
|
||||
# Internal -- parse endtag, return end or -1 if incomplete
|
||||
def parse_endtag(self, i):
|
||||
rawdata = self.rawdata
|
||||
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
|
||||
match = endendtag.search(rawdata, i+1) # >
|
||||
if not match:
|
||||
return -1
|
||||
gtpos = match.end()
|
||||
match = endtagfind.match(rawdata, i) # </ + tag + >
|
||||
if not match:
|
||||
if self.cdata_elem is not None:
|
||||
self.handle_data(rawdata[i:gtpos])
|
||||
return gtpos
|
||||
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
|
||||
namematch = tagfind.match(rawdata, i+2)
|
||||
if not namematch:
|
||||
# w3.org/TR/html5/tokenization.html#end-tag-open-state
|
||||
if rawdata[i:i+3] == '</>':
|
||||
return i+3
|
||||
else:
|
||||
return self.parse_bogus_comment(i)
|
||||
tagname = namematch.group(1).lower()
|
||||
# consume and ignore other stuff between the name and the >
|
||||
# Note: this is not 100% correct, since we might have things like
|
||||
# </tag attr=">">, but looking for > after tha name should cover
|
||||
# most of the cases and is much simpler
|
||||
gtpos = rawdata.find('>', namematch.end())
|
||||
self.handle_endtag(tagname)
|
||||
return gtpos+1
|
||||
|
||||
elem = match.group(1).lower() # script or style
|
||||
if self.cdata_elem is not None:
|
||||
if elem != self.cdata_elem:
|
||||
self.handle_data(rawdata[i:gtpos])
|
||||
return gtpos
|
||||
|
||||
self.handle_endtag(elem)
|
||||
self.clear_cdata_mode()
|
||||
return gtpos
|
||||
|
||||
# Overridable -- finish processing of start+end tag: <tag.../>
|
||||
def handle_startendtag(self, tag, attrs):
|
||||
self.handle_starttag(tag, attrs)
|
||||
self.handle_endtag(tag)
|
||||
|
||||
# Overridable -- handle start tag
|
||||
def handle_starttag(self, tag, attrs):
|
||||
pass
|
||||
|
||||
# Overridable -- handle end tag
|
||||
def handle_endtag(self, tag):
|
||||
pass
|
||||
|
||||
# Overridable -- handle character reference
|
||||
def handle_charref(self, name):
|
||||
pass
|
||||
|
||||
# Overridable -- handle entity reference
|
||||
def handle_entityref(self, name):
|
||||
pass
|
||||
|
||||
# Overridable -- handle data
|
||||
def handle_data(self, data):
|
||||
pass
|
||||
|
||||
# Overridable -- handle comment
|
||||
def handle_comment(self, data):
|
||||
pass
|
||||
|
||||
# Overridable -- handle declaration
|
||||
def handle_decl(self, decl):
|
||||
pass
|
||||
|
||||
# Overridable -- handle processing instruction
|
||||
def handle_pi(self, data):
|
||||
pass
|
||||
|
||||
def unknown_decl(self, data):
|
||||
pass
|
||||
|
||||
# Internal -- helper to remove special character quoting
|
||||
entitydefs = None
|
||||
def unescape(self, s):
|
||||
if '&' not in s:
|
||||
return s
|
||||
def replaceEntities(s):
|
||||
s = s.groups()[0]
|
||||
try:
|
||||
if s[0] == "#":
|
||||
s = s[1:]
|
||||
if s[0] in ['x','X']:
|
||||
c = int(s[1:], 16)
|
||||
else:
|
||||
c = int(s)
|
||||
return unichr(c)
|
||||
except ValueError:
|
||||
return '&#'+s+';'
|
||||
else:
|
||||
# Cannot use name2codepoint directly, because HTMLParser supports apos,
|
||||
# which is not part of HTML 4
|
||||
import htmlentitydefs
|
||||
if HTMLParser.entitydefs is None:
|
||||
entitydefs = HTMLParser.entitydefs = {'apos':u"'"}
|
||||
for k, v in htmlentitydefs.name2codepoint.iteritems():
|
||||
entitydefs[k] = unichr(v)
|
||||
try:
|
||||
return self.entitydefs[s]
|
||||
except KeyError:
|
||||
return '&'+s+';'
|
||||
|
||||
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));", replaceEntities, s)
|
Binary file not shown.
Binary file not shown.
244
PythonHome/Lib/Queue.py
Normal file
244
PythonHome/Lib/Queue.py
Normal file
@ -0,0 +1,244 @@
|
||||
"""A multi-producer, multi-consumer queue."""
|
||||
|
||||
from time import time as _time
|
||||
try:
|
||||
import threading as _threading
|
||||
except ImportError:
|
||||
import dummy_threading as _threading
|
||||
from collections import deque
|
||||
import heapq
|
||||
|
||||
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
|
||||
|
||||
class Empty(Exception):
|
||||
"Exception raised by Queue.get(block=0)/get_nowait()."
|
||||
pass
|
||||
|
||||
class Full(Exception):
|
||||
"Exception raised by Queue.put(block=0)/put_nowait()."
|
||||
pass
|
||||
|
||||
class Queue:
|
||||
"""Create a queue object with a given maximum size.
|
||||
|
||||
If maxsize is <= 0, the queue size is infinite.
|
||||
"""
|
||||
def __init__(self, maxsize=0):
|
||||
self.maxsize = maxsize
|
||||
self._init(maxsize)
|
||||
# mutex must be held whenever the queue is mutating. All methods
|
||||
# that acquire mutex must release it before returning. mutex
|
||||
# is shared between the three conditions, so acquiring and
|
||||
# releasing the conditions also acquires and releases mutex.
|
||||
self.mutex = _threading.Lock()
|
||||
# Notify not_empty whenever an item is added to the queue; a
|
||||
# thread waiting to get is notified then.
|
||||
self.not_empty = _threading.Condition(self.mutex)
|
||||
# Notify not_full whenever an item is removed from the queue;
|
||||
# a thread waiting to put is notified then.
|
||||
self.not_full = _threading.Condition(self.mutex)
|
||||
# Notify all_tasks_done whenever the number of unfinished tasks
|
||||
# drops to zero; thread waiting to join() is notified to resume
|
||||
self.all_tasks_done = _threading.Condition(self.mutex)
|
||||
self.unfinished_tasks = 0
|
||||
|
||||
def task_done(self):
|
||||
"""Indicate that a formerly enqueued task is complete.
|
||||
|
||||
Used by Queue consumer threads. For each get() used to fetch a task,
|
||||
a subsequent call to task_done() tells the queue that the processing
|
||||
on the task is complete.
|
||||
|
||||
If a join() is currently blocking, it will resume when all items
|
||||
have been processed (meaning that a task_done() call was received
|
||||
for every item that had been put() into the queue).
|
||||
|
||||
Raises a ValueError if called more times than there were items
|
||||
placed in the queue.
|
||||
"""
|
||||
self.all_tasks_done.acquire()
|
||||
try:
|
||||
unfinished = self.unfinished_tasks - 1
|
||||
if unfinished <= 0:
|
||||
if unfinished < 0:
|
||||
raise ValueError('task_done() called too many times')
|
||||
self.all_tasks_done.notify_all()
|
||||
self.unfinished_tasks = unfinished
|
||||
finally:
|
||||
self.all_tasks_done.release()
|
||||
|
||||
def join(self):
|
||||
"""Blocks until all items in the Queue have been gotten and processed.
|
||||
|
||||
The count of unfinished tasks goes up whenever an item is added to the
|
||||
queue. The count goes down whenever a consumer thread calls task_done()
|
||||
to indicate the item was retrieved and all work on it is complete.
|
||||
|
||||
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||
"""
|
||||
self.all_tasks_done.acquire()
|
||||
try:
|
||||
while self.unfinished_tasks:
|
||||
self.all_tasks_done.wait()
|
||||
finally:
|
||||
self.all_tasks_done.release()
|
||||
|
||||
def qsize(self):
|
||||
"""Return the approximate size of the queue (not reliable!)."""
|
||||
self.mutex.acquire()
|
||||
n = self._qsize()
|
||||
self.mutex.release()
|
||||
return n
|
||||
|
||||
def empty(self):
|
||||
"""Return True if the queue is empty, False otherwise (not reliable!)."""
|
||||
self.mutex.acquire()
|
||||
n = not self._qsize()
|
||||
self.mutex.release()
|
||||
return n
|
||||
|
||||
def full(self):
|
||||
"""Return True if the queue is full, False otherwise (not reliable!)."""
|
||||
self.mutex.acquire()
|
||||
n = 0 < self.maxsize == self._qsize()
|
||||
self.mutex.release()
|
||||
return n
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
"""Put an item into the queue.
|
||||
|
||||
If optional args 'block' is true and 'timeout' is None (the default),
|
||||
block if necessary until a free slot is available. If 'timeout' is
|
||||
a non-negative number, it blocks at most 'timeout' seconds and raises
|
||||
the Full exception if no free slot was available within that time.
|
||||
Otherwise ('block' is false), put an item on the queue if a free slot
|
||||
is immediately available, else raise the Full exception ('timeout'
|
||||
is ignored in that case).
|
||||
"""
|
||||
self.not_full.acquire()
|
||||
try:
|
||||
if self.maxsize > 0:
|
||||
if not block:
|
||||
if self._qsize() == self.maxsize:
|
||||
raise Full
|
||||
elif timeout is None:
|
||||
while self._qsize() == self.maxsize:
|
||||
self.not_full.wait()
|
||||
elif timeout < 0:
|
||||
raise ValueError("'timeout' must be a non-negative number")
|
||||
else:
|
||||
endtime = _time() + timeout
|
||||
while self._qsize() == self.maxsize:
|
||||
remaining = endtime - _time()
|
||||
if remaining <= 0.0:
|
||||
raise Full
|
||||
self.not_full.wait(remaining)
|
||||
self._put(item)
|
||||
self.unfinished_tasks += 1
|
||||
self.not_empty.notify()
|
||||
finally:
|
||||
self.not_full.release()
|
||||
|
||||
def put_nowait(self, item):
|
||||
"""Put an item into the queue without blocking.
|
||||
|
||||
Only enqueue the item if a free slot is immediately available.
|
||||
Otherwise raise the Full exception.
|
||||
"""
|
||||
return self.put(item, False)
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
"""Remove and return an item from the queue.
|
||||
|
||||
If optional args 'block' is true and 'timeout' is None (the default),
|
||||
block if necessary until an item is available. If 'timeout' is
|
||||
a non-negative number, it blocks at most 'timeout' seconds and raises
|
||||
the Empty exception if no item was available within that time.
|
||||
Otherwise ('block' is false), return an item if one is immediately
|
||||
available, else raise the Empty exception ('timeout' is ignored
|
||||
in that case).
|
||||
"""
|
||||
self.not_empty.acquire()
|
||||
try:
|
||||
if not block:
|
||||
if not self._qsize():
|
||||
raise Empty
|
||||
elif timeout is None:
|
||||
while not self._qsize():
|
||||
self.not_empty.wait()
|
||||
elif timeout < 0:
|
||||
raise ValueError("'timeout' must be a non-negative number")
|
||||
else:
|
||||
endtime = _time() + timeout
|
||||
while not self._qsize():
|
||||
remaining = endtime - _time()
|
||||
if remaining <= 0.0:
|
||||
raise Empty
|
||||
self.not_empty.wait(remaining)
|
||||
item = self._get()
|
||||
self.not_full.notify()
|
||||
return item
|
||||
finally:
|
||||
self.not_empty.release()
|
||||
|
||||
def get_nowait(self):
|
||||
"""Remove and return an item from the queue without blocking.
|
||||
|
||||
Only get an item if one is immediately available. Otherwise
|
||||
raise the Empty exception.
|
||||
"""
|
||||
return self.get(False)
|
||||
|
||||
# Override these methods to implement other queue organizations
|
||||
# (e.g. stack or priority queue).
|
||||
# These will only be called with appropriate locks held
|
||||
|
||||
# Initialize the queue representation
|
||||
def _init(self, maxsize):
|
||||
self.queue = deque()
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queue)
|
||||
|
||||
# Put a new item in the queue
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
# Get an item from the queue
|
||||
def _get(self):
|
||||
return self.queue.popleft()
|
||||
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
'''Variant of Queue that retrieves open entries in priority order (lowest first).
|
||||
|
||||
Entries are typically tuples of the form: (priority number, data).
|
||||
'''
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = []
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queue)
|
||||
|
||||
def _put(self, item, heappush=heapq.heappush):
|
||||
heappush(self.queue, item)
|
||||
|
||||
def _get(self, heappop=heapq.heappop):
|
||||
return heappop(self.queue)
|
||||
|
||||
|
||||
class LifoQueue(Queue):
|
||||
'''Variant of Queue that retrieves most recently added entries first.'''
|
||||
|
||||
def _init(self, maxsize):
|
||||
self.queue = []
|
||||
|
||||
def _qsize(self, len=len):
|
||||
return len(self.queue)
|
||||
|
||||
def _put(self, item):
|
||||
self.queue.append(item)
|
||||
|
||||
def _get(self):
|
||||
return self.queue.pop()
|
Binary file not shown.
230
PythonHome/Lib/SimpleHTTPServer.py
Normal file
230
PythonHome/Lib/SimpleHTTPServer.py
Normal file
@ -0,0 +1,230 @@
|
||||
"""Simple HTTP Server.
|
||||
|
||||
This module builds on BaseHTTPServer by implementing the standard GET
|
||||
and HEAD requests in a fairly straightforward manner.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
__version__ = "0.6"
|
||||
|
||||
__all__ = ["SimpleHTTPRequestHandler"]
|
||||
|
||||
import os
|
||||
import posixpath
|
||||
import BaseHTTPServer
|
||||
import urllib
|
||||
import cgi
|
||||
import sys
|
||||
import shutil
|
||||
import mimetypes
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
|
||||
|
||||
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
|
||||
"""Simple HTTP request handler with GET and HEAD commands.
|
||||
|
||||
This serves files from the current directory and any of its
|
||||
subdirectories. The MIME type for files is determined by
|
||||
calling the .guess_type() method.
|
||||
|
||||
The GET and HEAD requests are identical except that the HEAD
|
||||
request omits the actual contents of the file.
|
||||
|
||||
"""
|
||||
|
||||
server_version = "SimpleHTTP/" + __version__
|
||||
|
||||
def do_GET(self):
|
||||
"""Serve a GET request."""
|
||||
f = self.send_head()
|
||||
if f:
|
||||
try:
|
||||
self.copyfile(f, self.wfile)
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def do_HEAD(self):
|
||||
"""Serve a HEAD request."""
|
||||
f = self.send_head()
|
||||
if f:
|
||||
f.close()
|
||||
|
||||
def send_head(self):
|
||||
"""Common code for GET and HEAD commands.
|
||||
|
||||
This sends the response code and MIME headers.
|
||||
|
||||
Return value is either a file object (which has to be copied
|
||||
to the outputfile by the caller unless the command was HEAD,
|
||||
and must be closed by the caller under all circumstances), or
|
||||
None, in which case the caller has nothing further to do.
|
||||
|
||||
"""
|
||||
path = self.translate_path(self.path)
|
||||
f = None
|
||||
if os.path.isdir(path):
|
||||
if not self.path.endswith('/'):
|
||||
# redirect browser - doing basically what apache does
|
||||
self.send_response(301)
|
||||
self.send_header("Location", self.path + "/")
|
||||
self.end_headers()
|
||||
return None
|
||||
for index in "index.html", "index.htm":
|
||||
index = os.path.join(path, index)
|
||||
if os.path.exists(index):
|
||||
path = index
|
||||
break
|
||||
else:
|
||||
return self.list_directory(path)
|
||||
ctype = self.guess_type(path)
|
||||
try:
|
||||
# Always read in binary mode. Opening files in text mode may cause
|
||||
# newline translations, making the actual size of the content
|
||||
# transmitted *less* than the content-length!
|
||||
f = open(path, 'rb')
|
||||
except IOError:
|
||||
self.send_error(404, "File not found")
|
||||
return None
|
||||
try:
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", ctype)
|
||||
fs = os.fstat(f.fileno())
|
||||
self.send_header("Content-Length", str(fs[6]))
|
||||
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
|
||||
self.end_headers()
|
||||
return f
|
||||
except:
|
||||
f.close()
|
||||
raise
|
||||
|
||||
def list_directory(self, path):
|
||||
"""Helper to produce a directory listing (absent index.html).
|
||||
|
||||
Return value is either a file object, or None (indicating an
|
||||
error). In either case, the headers are sent, making the
|
||||
interface the same as for send_head().
|
||||
|
||||
"""
|
||||
try:
|
||||
list = os.listdir(path)
|
||||
except os.error:
|
||||
self.send_error(404, "No permission to list directory")
|
||||
return None
|
||||
list.sort(key=lambda a: a.lower())
|
||||
f = StringIO()
|
||||
displaypath = cgi.escape(urllib.unquote(self.path))
|
||||
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
|
||||
f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
|
||||
f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
|
||||
f.write("<hr>\n<ul>\n")
|
||||
for name in list:
|
||||
fullname = os.path.join(path, name)
|
||||
displayname = linkname = name
|
||||
# Append / for directories or @ for symbolic links
|
||||
if os.path.isdir(fullname):
|
||||
displayname = name + "/"
|
||||
linkname = name + "/"
|
||||
if os.path.islink(fullname):
|
||||
displayname = name + "@"
|
||||
# Note: a link to a directory displays with @ and links with /
|
||||
f.write('<li><a href="%s">%s</a>\n'
|
||||
% (urllib.quote(linkname), cgi.escape(displayname)))
|
||||
f.write("</ul>\n<hr>\n</body>\n</html>\n")
|
||||
length = f.tell()
|
||||
f.seek(0)
|
||||
self.send_response(200)
|
||||
encoding = sys.getfilesystemencoding()
|
||||
self.send_header("Content-type", "text/html; charset=%s" % encoding)
|
||||
self.send_header("Content-Length", str(length))
|
||||
self.end_headers()
|
||||
return f
|
||||
|
||||
def translate_path(self, path):
|
||||
"""Translate a /-separated PATH to the local filename syntax.
|
||||
|
||||
Components that mean special things to the local file system
|
||||
(e.g. drive or directory names) are ignored. (XXX They should
|
||||
probably be diagnosed.)
|
||||
|
||||
"""
|
||||
# abandon query parameters
|
||||
path = path.split('?',1)[0]
|
||||
path = path.split('#',1)[0]
|
||||
# Don't forget explicit trailing slash when normalizing. Issue17324
|
||||
trailing_slash = path.rstrip().endswith('/')
|
||||
path = posixpath.normpath(urllib.unquote(path))
|
||||
words = path.split('/')
|
||||
words = filter(None, words)
|
||||
path = os.getcwd()
|
||||
for word in words:
|
||||
drive, word = os.path.splitdrive(word)
|
||||
head, word = os.path.split(word)
|
||||
if word in (os.curdir, os.pardir): continue
|
||||
path = os.path.join(path, word)
|
||||
if trailing_slash:
|
||||
path += '/'
|
||||
return path
|
||||
|
||||
def copyfile(self, source, outputfile):
|
||||
"""Copy all data between two file objects.
|
||||
|
||||
The SOURCE argument is a file object open for reading
|
||||
(or anything with a read() method) and the DESTINATION
|
||||
argument is a file object open for writing (or
|
||||
anything with a write() method).
|
||||
|
||||
The only reason for overriding this would be to change
|
||||
the block size or perhaps to replace newlines by CRLF
|
||||
-- note however that this the default server uses this
|
||||
to copy binary data as well.
|
||||
|
||||
"""
|
||||
shutil.copyfileobj(source, outputfile)
|
||||
|
||||
def guess_type(self, path):
|
||||
"""Guess the type of a file.
|
||||
|
||||
Argument is a PATH (a filename).
|
||||
|
||||
Return value is a string of the form type/subtype,
|
||||
usable for a MIME Content-type header.
|
||||
|
||||
The default implementation looks the file's extension
|
||||
up in the table self.extensions_map, using application/octet-stream
|
||||
as a default; however it would be permissible (if
|
||||
slow) to look inside the data to make a better guess.
|
||||
|
||||
"""
|
||||
|
||||
base, ext = posixpath.splitext(path)
|
||||
if ext in self.extensions_map:
|
||||
return self.extensions_map[ext]
|
||||
ext = ext.lower()
|
||||
if ext in self.extensions_map:
|
||||
return self.extensions_map[ext]
|
||||
else:
|
||||
return self.extensions_map['']
|
||||
|
||||
if not mimetypes.inited:
|
||||
mimetypes.init() # try to read system mime.types
|
||||
extensions_map = mimetypes.types_map.copy()
|
||||
extensions_map.update({
|
||||
'': 'application/octet-stream', # Default
|
||||
'.py': 'text/plain',
|
||||
'.c': 'text/plain',
|
||||
'.h': 'text/plain',
|
||||
})
|
||||
|
||||
|
||||
def test(HandlerClass = SimpleHTTPRequestHandler,
|
||||
ServerClass = BaseHTTPServer.HTTPServer):
|
||||
BaseHTTPServer.test(HandlerClass, ServerClass)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
Binary file not shown.
708
PythonHome/Lib/SimpleXMLRPCServer.py
Normal file
708
PythonHome/Lib/SimpleXMLRPCServer.py
Normal file
@ -0,0 +1,708 @@
|
||||
r"""Simple XML-RPC Server.
|
||||
|
||||
This module can be used to create simple XML-RPC servers
|
||||
by creating a server and either installing functions, a
|
||||
class instance, or by extending the SimpleXMLRPCServer
|
||||
class.
|
||||
|
||||
It can also be used to handle XML-RPC requests in a CGI
|
||||
environment using CGIXMLRPCRequestHandler.
|
||||
|
||||
A list of possible usage patterns follows:
|
||||
|
||||
1. Install functions:
|
||||
|
||||
server = SimpleXMLRPCServer(("localhost", 8000))
|
||||
server.register_function(pow)
|
||||
server.register_function(lambda x,y: x+y, 'add')
|
||||
server.serve_forever()
|
||||
|
||||
2. Install an instance:
|
||||
|
||||
class MyFuncs:
|
||||
def __init__(self):
|
||||
# make all of the string functions available through
|
||||
# string.func_name
|
||||
import string
|
||||
self.string = string
|
||||
def _listMethods(self):
|
||||
# implement this method so that system.listMethods
|
||||
# knows to advertise the strings methods
|
||||
return list_public_methods(self) + \
|
||||
['string.' + method for method in list_public_methods(self.string)]
|
||||
def pow(self, x, y): return pow(x, y)
|
||||
def add(self, x, y) : return x + y
|
||||
|
||||
server = SimpleXMLRPCServer(("localhost", 8000))
|
||||
server.register_introspection_functions()
|
||||
server.register_instance(MyFuncs())
|
||||
server.serve_forever()
|
||||
|
||||
3. Install an instance with custom dispatch method:
|
||||
|
||||
class Math:
|
||||
def _listMethods(self):
|
||||
# this method must be present for system.listMethods
|
||||
# to work
|
||||
return ['add', 'pow']
|
||||
def _methodHelp(self, method):
|
||||
# this method must be present for system.methodHelp
|
||||
# to work
|
||||
if method == 'add':
|
||||
return "add(2,3) => 5"
|
||||
elif method == 'pow':
|
||||
return "pow(x, y[, z]) => number"
|
||||
else:
|
||||
# By convention, return empty
|
||||
# string if no help is available
|
||||
return ""
|
||||
def _dispatch(self, method, params):
|
||||
if method == 'pow':
|
||||
return pow(*params)
|
||||
elif method == 'add':
|
||||
return params[0] + params[1]
|
||||
else:
|
||||
raise 'bad method'
|
||||
|
||||
server = SimpleXMLRPCServer(("localhost", 8000))
|
||||
server.register_introspection_functions()
|
||||
server.register_instance(Math())
|
||||
server.serve_forever()
|
||||
|
||||
4. Subclass SimpleXMLRPCServer:
|
||||
|
||||
class MathServer(SimpleXMLRPCServer):
|
||||
def _dispatch(self, method, params):
|
||||
try:
|
||||
# We are forcing the 'export_' prefix on methods that are
|
||||
# callable through XML-RPC to prevent potential security
|
||||
# problems
|
||||
func = getattr(self, 'export_' + method)
|
||||
except AttributeError:
|
||||
raise Exception('method "%s" is not supported' % method)
|
||||
else:
|
||||
return func(*params)
|
||||
|
||||
def export_add(self, x, y):
|
||||
return x + y
|
||||
|
||||
server = MathServer(("localhost", 8000))
|
||||
server.serve_forever()
|
||||
|
||||
5. CGI script:
|
||||
|
||||
server = CGIXMLRPCRequestHandler()
|
||||
server.register_function(pow)
|
||||
server.handle_request()
|
||||
"""
|
||||
|
||||
# Written by Brian Quinlan (brian@sweetapp.com).
|
||||
# Based on code written by Fredrik Lundh.
|
||||
|
||||
import xmlrpclib
|
||||
from xmlrpclib import Fault
|
||||
import SocketServer
|
||||
import BaseHTTPServer
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
import re
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
fcntl = None
|
||||
|
||||
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
|
||||
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
|
||||
|
||||
Resolves a dotted attribute name to an object. Raises
|
||||
an AttributeError if any attribute in the chain starts with a '_'.
|
||||
|
||||
If the optional allow_dotted_names argument is false, dots are not
|
||||
supported and this function operates similar to getattr(obj, attr).
|
||||
"""
|
||||
|
||||
if allow_dotted_names:
|
||||
attrs = attr.split('.')
|
||||
else:
|
||||
attrs = [attr]
|
||||
|
||||
for i in attrs:
|
||||
if i.startswith('_'):
|
||||
raise AttributeError(
|
||||
'attempt to access private attribute "%s"' % i
|
||||
)
|
||||
else:
|
||||
obj = getattr(obj,i)
|
||||
return obj
|
||||
|
||||
def list_public_methods(obj):
|
||||
"""Returns a list of attribute strings, found in the specified
|
||||
object, which represent callable attributes"""
|
||||
|
||||
return [member for member in dir(obj)
|
||||
if not member.startswith('_') and
|
||||
hasattr(getattr(obj, member), '__call__')]
|
||||
|
||||
def remove_duplicates(lst):
|
||||
"""remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
|
||||
|
||||
Returns a copy of a list without duplicates. Every list
|
||||
item must be hashable and the order of the items in the
|
||||
resulting list is not defined.
|
||||
"""
|
||||
u = {}
|
||||
for x in lst:
|
||||
u[x] = 1
|
||||
|
||||
return u.keys()
|
||||
|
||||
class SimpleXMLRPCDispatcher:
|
||||
"""Mix-in class that dispatches XML-RPC requests.
|
||||
|
||||
This class is used to register XML-RPC method handlers
|
||||
and then to dispatch them. This class doesn't need to be
|
||||
instanced directly when used by SimpleXMLRPCServer but it
|
||||
can be instanced when used by the MultiPathXMLRPCServer.
|
||||
"""
|
||||
|
||||
def __init__(self, allow_none=False, encoding=None):
|
||||
self.funcs = {}
|
||||
self.instance = None
|
||||
self.allow_none = allow_none
|
||||
self.encoding = encoding
|
||||
|
||||
def register_instance(self, instance, allow_dotted_names=False):
|
||||
"""Registers an instance to respond to XML-RPC requests.
|
||||
|
||||
Only one instance can be installed at a time.
|
||||
|
||||
If the registered instance has a _dispatch method then that
|
||||
method will be called with the name of the XML-RPC method and
|
||||
its parameters as a tuple
|
||||
e.g. instance._dispatch('add',(2,3))
|
||||
|
||||
If the registered instance does not have a _dispatch method
|
||||
then the instance will be searched to find a matching method
|
||||
and, if found, will be called. Methods beginning with an '_'
|
||||
are considered private and will not be called by
|
||||
SimpleXMLRPCServer.
|
||||
|
||||
If a registered function matches a XML-RPC request, then it
|
||||
will be called instead of the registered instance.
|
||||
|
||||
If the optional allow_dotted_names argument is true and the
|
||||
instance does not have a _dispatch method, method names
|
||||
containing dots are supported and resolved, as long as none of
|
||||
the name segments start with an '_'.
|
||||
|
||||
*** SECURITY WARNING: ***
|
||||
|
||||
Enabling the allow_dotted_names options allows intruders
|
||||
to access your module's global variables and may allow
|
||||
intruders to execute arbitrary code on your machine. Only
|
||||
use this option on a secure, closed network.
|
||||
|
||||
"""
|
||||
|
||||
self.instance = instance
|
||||
self.allow_dotted_names = allow_dotted_names
|
||||
|
||||
def register_function(self, function, name = None):
|
||||
"""Registers a function to respond to XML-RPC requests.
|
||||
|
||||
The optional name argument can be used to set a Unicode name
|
||||
for the function.
|
||||
"""
|
||||
|
||||
if name is None:
|
||||
name = function.__name__
|
||||
self.funcs[name] = function
|
||||
|
||||
def register_introspection_functions(self):
|
||||
"""Registers the XML-RPC introspection methods in the system
|
||||
namespace.
|
||||
|
||||
see http://xmlrpc.usefulinc.com/doc/reserved.html
|
||||
"""
|
||||
|
||||
self.funcs.update({'system.listMethods' : self.system_listMethods,
|
||||
'system.methodSignature' : self.system_methodSignature,
|
||||
'system.methodHelp' : self.system_methodHelp})
|
||||
|
||||
def register_multicall_functions(self):
|
||||
"""Registers the XML-RPC multicall method in the system
|
||||
namespace.
|
||||
|
||||
see http://www.xmlrpc.com/discuss/msgReader$1208"""
|
||||
|
||||
self.funcs.update({'system.multicall' : self.system_multicall})
|
||||
|
||||
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
|
||||
"""Dispatches an XML-RPC method from marshalled (XML) data.
|
||||
|
||||
XML-RPC methods are dispatched from the marshalled (XML) data
|
||||
using the _dispatch method and the result is returned as
|
||||
marshalled data. For backwards compatibility, a dispatch
|
||||
function can be provided as an argument (see comment in
|
||||
SimpleXMLRPCRequestHandler.do_POST) but overriding the
|
||||
existing method through subclassing is the preferred means
|
||||
of changing method dispatch behavior.
|
||||
"""
|
||||
|
||||
try:
|
||||
params, method = xmlrpclib.loads(data)
|
||||
|
||||
# generate response
|
||||
if dispatch_method is not None:
|
||||
response = dispatch_method(method, params)
|
||||
else:
|
||||
response = self._dispatch(method, params)
|
||||
# wrap response in a singleton tuple
|
||||
response = (response,)
|
||||
response = xmlrpclib.dumps(response, methodresponse=1,
|
||||
allow_none=self.allow_none, encoding=self.encoding)
|
||||
except Fault, fault:
|
||||
response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
|
||||
encoding=self.encoding)
|
||||
except:
|
||||
# report exception back to server
|
||||
exc_type, exc_value, exc_tb = sys.exc_info()
|
||||
response = xmlrpclib.dumps(
|
||||
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
|
||||
encoding=self.encoding, allow_none=self.allow_none,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def system_listMethods(self):
|
||||
"""system.listMethods() => ['add', 'subtract', 'multiple']
|
||||
|
||||
Returns a list of the methods supported by the server."""
|
||||
|
||||
methods = self.funcs.keys()
|
||||
if self.instance is not None:
|
||||
# Instance can implement _listMethod to return a list of
|
||||
# methods
|
||||
if hasattr(self.instance, '_listMethods'):
|
||||
methods = remove_duplicates(
|
||||
methods + self.instance._listMethods()
|
||||
)
|
||||
# if the instance has a _dispatch method then we
|
||||
# don't have enough information to provide a list
|
||||
# of methods
|
||||
elif not hasattr(self.instance, '_dispatch'):
|
||||
methods = remove_duplicates(
|
||||
methods + list_public_methods(self.instance)
|
||||
)
|
||||
methods.sort()
|
||||
return methods
|
||||
|
||||
def system_methodSignature(self, method_name):
|
||||
"""system.methodSignature('add') => [double, int, int]
|
||||
|
||||
Returns a list describing the signature of the method. In the
|
||||
above example, the add method takes two integers as arguments
|
||||
and returns a double result.
|
||||
|
||||
This server does NOT support system.methodSignature."""
|
||||
|
||||
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
|
||||
|
||||
return 'signatures not supported'
|
||||
|
||||
def system_methodHelp(self, method_name):
|
||||
"""system.methodHelp('add') => "Adds two integers together"
|
||||
|
||||
Returns a string containing documentation for the specified method."""
|
||||
|
||||
method = None
|
||||
if method_name in self.funcs:
|
||||
method = self.funcs[method_name]
|
||||
elif self.instance is not None:
|
||||
# Instance can implement _methodHelp to return help for a method
|
||||
if hasattr(self.instance, '_methodHelp'):
|
||||
return self.instance._methodHelp(method_name)
|
||||
# if the instance has a _dispatch method then we
|
||||
# don't have enough information to provide help
|
||||
elif not hasattr(self.instance, '_dispatch'):
|
||||
try:
|
||||
method = resolve_dotted_attribute(
|
||||
self.instance,
|
||||
method_name,
|
||||
self.allow_dotted_names
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
# Note that we aren't checking that the method actually
|
||||
# be a callable object of some kind
|
||||
if method is None:
|
||||
return ""
|
||||
else:
|
||||
import pydoc
|
||||
return pydoc.getdoc(method)
|
||||
|
||||
def system_multicall(self, call_list):
|
||||
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
|
||||
[[4], ...]
|
||||
|
||||
Allows the caller to package multiple XML-RPC calls into a single
|
||||
request.
|
||||
|
||||
See http://www.xmlrpc.com/discuss/msgReader$1208
|
||||
"""
|
||||
|
||||
results = []
|
||||
for call in call_list:
|
||||
method_name = call['methodName']
|
||||
params = call['params']
|
||||
|
||||
try:
|
||||
# XXX A marshalling error in any response will fail the entire
|
||||
# multicall. If someone cares they should fix this.
|
||||
results.append([self._dispatch(method_name, params)])
|
||||
except Fault, fault:
|
||||
results.append(
|
||||
{'faultCode' : fault.faultCode,
|
||||
'faultString' : fault.faultString}
|
||||
)
|
||||
except:
|
||||
exc_type, exc_value, exc_tb = sys.exc_info()
|
||||
results.append(
|
||||
{'faultCode' : 1,
|
||||
'faultString' : "%s:%s" % (exc_type, exc_value)}
|
||||
)
|
||||
return results
|
||||
|
||||
def _dispatch(self, method, params):
|
||||
"""Dispatches the XML-RPC method.
|
||||
|
||||
XML-RPC calls are forwarded to a registered function that
|
||||
matches the called XML-RPC method name. If no such function
|
||||
exists then the call is forwarded to the registered instance,
|
||||
if available.
|
||||
|
||||
If the registered instance has a _dispatch method then that
|
||||
method will be called with the name of the XML-RPC method and
|
||||
its parameters as a tuple
|
||||
e.g. instance._dispatch('add',(2,3))
|
||||
|
||||
If the registered instance does not have a _dispatch method
|
||||
then the instance will be searched to find a matching method
|
||||
and, if found, will be called.
|
||||
|
||||
Methods beginning with an '_' are considered private and will
|
||||
not be called.
|
||||
"""
|
||||
|
||||
func = None
|
||||
try:
|
||||
# check to see if a matching function has been registered
|
||||
func = self.funcs[method]
|
||||
except KeyError:
|
||||
if self.instance is not None:
|
||||
# check for a _dispatch method
|
||||
if hasattr(self.instance, '_dispatch'):
|
||||
return self.instance._dispatch(method, params)
|
||||
else:
|
||||
# call instance method directly
|
||||
try:
|
||||
func = resolve_dotted_attribute(
|
||||
self.instance,
|
||||
method,
|
||||
self.allow_dotted_names
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if func is not None:
|
||||
return func(*params)
|
||||
else:
|
||||
raise Exception('method "%s" is not supported' % method)
|
||||
|
||||
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
|
||||
"""Simple XML-RPC request handler class.
|
||||
|
||||
Handles all HTTP POST requests and attempts to decode them as
|
||||
XML-RPC requests.
|
||||
"""
|
||||
|
||||
# Class attribute listing the accessible path components;
|
||||
# paths not on this list will result in a 404 error.
|
||||
rpc_paths = ('/', '/RPC2')
|
||||
|
||||
#if not None, encode responses larger than this, if possible
|
||||
encode_threshold = 1400 #a common MTU
|
||||
|
||||
#Override form StreamRequestHandler: full buffering of output
|
||||
#and no Nagle.
|
||||
wbufsize = -1
|
||||
disable_nagle_algorithm = True
|
||||
|
||||
# a re to match a gzip Accept-Encoding
|
||||
aepattern = re.compile(r"""
|
||||
\s* ([^\s;]+) \s* #content-coding
|
||||
(;\s* q \s*=\s* ([0-9\.]+))? #q
|
||||
""", re.VERBOSE | re.IGNORECASE)
|
||||
|
||||
def accept_encodings(self):
|
||||
r = {}
|
||||
ae = self.headers.get("Accept-Encoding", "")
|
||||
for e in ae.split(","):
|
||||
match = self.aepattern.match(e)
|
||||
if match:
|
||||
v = match.group(3)
|
||||
v = float(v) if v else 1.0
|
||||
r[match.group(1)] = v
|
||||
return r
|
||||
|
||||
def is_rpc_path_valid(self):
|
||||
if self.rpc_paths:
|
||||
return self.path in self.rpc_paths
|
||||
else:
|
||||
# If .rpc_paths is empty, just assume all paths are legal
|
||||
return True
|
||||
|
||||
def do_POST(self):
|
||||
"""Handles the HTTP POST request.
|
||||
|
||||
Attempts to interpret all HTTP POST requests as XML-RPC calls,
|
||||
which are forwarded to the server's _dispatch method for handling.
|
||||
"""
|
||||
|
||||
# Check that the path is legal
|
||||
if not self.is_rpc_path_valid():
|
||||
self.report_404()
|
||||
return
|
||||
|
||||
try:
|
||||
# Get arguments by reading body of request.
|
||||
# We read this in chunks to avoid straining
|
||||
# socket.read(); around the 10 or 15Mb mark, some platforms
|
||||
# begin to have problems (bug #792570).
|
||||
max_chunk_size = 10*1024*1024
|
||||
size_remaining = int(self.headers["content-length"])
|
||||
L = []
|
||||
while size_remaining:
|
||||
chunk_size = min(size_remaining, max_chunk_size)
|
||||
chunk = self.rfile.read(chunk_size)
|
||||
if not chunk:
|
||||
break
|
||||
L.append(chunk)
|
||||
size_remaining -= len(L[-1])
|
||||
data = ''.join(L)
|
||||
|
||||
data = self.decode_request_content(data)
|
||||
if data is None:
|
||||
return #response has been sent
|
||||
|
||||
# In previous versions of SimpleXMLRPCServer, _dispatch
|
||||
# could be overridden in this class, instead of in
|
||||
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
|
||||
# check to see if a subclass implements _dispatch and dispatch
|
||||
# using that method if present.
|
||||
response = self.server._marshaled_dispatch(
|
||||
data, getattr(self, '_dispatch', None), self.path
|
||||
)
|
||||
except Exception, e: # This should only happen if the module is buggy
|
||||
# internal error, report as HTTP server error
|
||||
self.send_response(500)
|
||||
|
||||
# Send information about the exception if requested
|
||||
if hasattr(self.server, '_send_traceback_header') and \
|
||||
self.server._send_traceback_header:
|
||||
self.send_header("X-exception", str(e))
|
||||
self.send_header("X-traceback", traceback.format_exc())
|
||||
|
||||
self.send_header("Content-length", "0")
|
||||
self.end_headers()
|
||||
else:
|
||||
# got a valid XML RPC response
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/xml")
|
||||
if self.encode_threshold is not None:
|
||||
if len(response) > self.encode_threshold:
|
||||
q = self.accept_encodings().get("gzip", 0)
|
||||
if q:
|
||||
try:
|
||||
response = xmlrpclib.gzip_encode(response)
|
||||
self.send_header("Content-Encoding", "gzip")
|
||||
except NotImplementedError:
|
||||
pass
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
def decode_request_content(self, data):
|
||||
#support gzip encoding of request
|
||||
encoding = self.headers.get("content-encoding", "identity").lower()
|
||||
if encoding == "identity":
|
||||
return data
|
||||
if encoding == "gzip":
|
||||
try:
|
||||
return xmlrpclib.gzip_decode(data)
|
||||
except NotImplementedError:
|
||||
self.send_response(501, "encoding %r not supported" % encoding)
|
||||
except ValueError:
|
||||
self.send_response(400, "error decoding gzip content")
|
||||
else:
|
||||
self.send_response(501, "encoding %r not supported" % encoding)
|
||||
self.send_header("Content-length", "0")
|
||||
self.end_headers()
|
||||
|
||||
def report_404 (self):
|
||||
# Report a 404 error
|
||||
self.send_response(404)
|
||||
response = 'No such page'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
def log_request(self, code='-', size='-'):
|
||||
"""Selectively log an accepted request."""
|
||||
|
||||
if self.server.logRequests:
|
||||
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
|
||||
|
||||
class SimpleXMLRPCServer(SocketServer.TCPServer,
|
||||
SimpleXMLRPCDispatcher):
|
||||
"""Simple XML-RPC server.
|
||||
|
||||
Simple XML-RPC server that allows functions and a single instance
|
||||
to be installed to handle requests. The default implementation
|
||||
attempts to dispatch XML-RPC calls to the functions or instance
|
||||
installed in the server. Override the _dispatch method inhereted
|
||||
from SimpleXMLRPCDispatcher to change this behavior.
|
||||
"""
|
||||
|
||||
allow_reuse_address = True
|
||||
|
||||
# Warning: this is for debugging purposes only! Never set this to True in
|
||||
# production code, as will be sending out sensitive information (exception
|
||||
# and stack trace details) when exceptions are raised inside
|
||||
# SimpleXMLRPCRequestHandler.do_POST
|
||||
_send_traceback_header = False
|
||||
|
||||
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
|
||||
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
|
||||
self.logRequests = logRequests
|
||||
|
||||
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
|
||||
SocketServer.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
|
||||
|
||||
# [Bug #1222790] If possible, set close-on-exec flag; if a
|
||||
# method spawns a subprocess, the subprocess shouldn't have
|
||||
# the listening socket open.
|
||||
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
|
||||
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
|
||||
flags |= fcntl.FD_CLOEXEC
|
||||
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
|
||||
|
||||
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
|
||||
"""Multipath XML-RPC Server
|
||||
This specialization of SimpleXMLRPCServer allows the user to create
|
||||
multiple Dispatcher instances and assign them to different
|
||||
HTTP request paths. This makes it possible to run two or more
|
||||
'virtual XML-RPC servers' at the same port.
|
||||
Make sure that the requestHandler accepts the paths in question.
|
||||
"""
|
||||
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
|
||||
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
|
||||
|
||||
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
|
||||
encoding, bind_and_activate)
|
||||
self.dispatchers = {}
|
||||
self.allow_none = allow_none
|
||||
self.encoding = encoding
|
||||
|
||||
def add_dispatcher(self, path, dispatcher):
|
||||
self.dispatchers[path] = dispatcher
|
||||
return dispatcher
|
||||
|
||||
def get_dispatcher(self, path):
|
||||
return self.dispatchers[path]
|
||||
|
||||
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
|
||||
try:
|
||||
response = self.dispatchers[path]._marshaled_dispatch(
|
||||
data, dispatch_method, path)
|
||||
except:
|
||||
# report low level exception back to server
|
||||
# (each dispatcher should have handled their own
|
||||
# exceptions)
|
||||
exc_type, exc_value = sys.exc_info()[:2]
|
||||
response = xmlrpclib.dumps(
|
||||
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
|
||||
encoding=self.encoding, allow_none=self.allow_none)
|
||||
return response
|
||||
|
||||
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
|
||||
"""Simple handler for XML-RPC data passed through CGI."""
|
||||
|
||||
def __init__(self, allow_none=False, encoding=None):
|
||||
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
|
||||
|
||||
def handle_xmlrpc(self, request_text):
|
||||
"""Handle a single XML-RPC request"""
|
||||
|
||||
response = self._marshaled_dispatch(request_text)
|
||||
|
||||
print 'Content-Type: text/xml'
|
||||
print 'Content-Length: %d' % len(response)
|
||||
print
|
||||
sys.stdout.write(response)
|
||||
|
||||
def handle_get(self):
|
||||
"""Handle a single HTTP GET request.
|
||||
|
||||
Default implementation indicates an error because
|
||||
XML-RPC uses the POST method.
|
||||
"""
|
||||
|
||||
code = 400
|
||||
message, explain = \
|
||||
BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
|
||||
|
||||
response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
|
||||
{
|
||||
'code' : code,
|
||||
'message' : message,
|
||||
'explain' : explain
|
||||
}
|
||||
print 'Status: %d %s' % (code, message)
|
||||
print 'Content-Type: %s' % BaseHTTPServer.DEFAULT_ERROR_CONTENT_TYPE
|
||||
print 'Content-Length: %d' % len(response)
|
||||
print
|
||||
sys.stdout.write(response)
|
||||
|
||||
def handle_request(self, request_text = None):
|
||||
"""Handle a single XML-RPC request passed through a CGI post method.
|
||||
|
||||
If no XML data is given then it is read from stdin. The resulting
|
||||
XML-RPC response is printed to stdout along with the correct HTTP
|
||||
headers.
|
||||
"""
|
||||
|
||||
if request_text is None and \
|
||||
os.environ.get('REQUEST_METHOD', None) == 'GET':
|
||||
self.handle_get()
|
||||
else:
|
||||
# POST data is normally available through stdin
|
||||
try:
|
||||
length = int(os.environ.get('CONTENT_LENGTH', None))
|
||||
except (TypeError, ValueError):
|
||||
length = -1
|
||||
if request_text is None:
|
||||
request_text = sys.stdin.read(length)
|
||||
|
||||
self.handle_xmlrpc(request_text)
|
||||
|
||||
if __name__ == '__main__':
|
||||
print 'Running XML-RPC server on port 8000'
|
||||
server = SimpleXMLRPCServer(("localhost", 8000))
|
||||
server.register_function(pow)
|
||||
server.register_function(lambda x,y: x+y, 'add')
|
||||
server.register_multicall_functions()
|
||||
server.serve_forever()
|
Binary file not shown.
733
PythonHome/Lib/SocketServer.py
Normal file
733
PythonHome/Lib/SocketServer.py
Normal file
@ -0,0 +1,733 @@
|
||||
"""Generic socket server classes.
|
||||
|
||||
This module tries to capture the various aspects of defining a server:
|
||||
|
||||
For socket-based servers:
|
||||
|
||||
- address family:
|
||||
- AF_INET{,6}: IP (Internet Protocol) sockets (default)
|
||||
- AF_UNIX: Unix domain sockets
|
||||
- others, e.g. AF_DECNET are conceivable (see <socket.h>
|
||||
- socket type:
|
||||
- SOCK_STREAM (reliable stream, e.g. TCP)
|
||||
- SOCK_DGRAM (datagrams, e.g. UDP)
|
||||
|
||||
For request-based servers (including socket-based):
|
||||
|
||||
- client address verification before further looking at the request
|
||||
(This is actually a hook for any processing that needs to look
|
||||
at the request before anything else, e.g. logging)
|
||||
- how to handle multiple requests:
|
||||
- synchronous (one request is handled at a time)
|
||||
- forking (each request is handled by a new process)
|
||||
- threading (each request is handled by a new thread)
|
||||
|
||||
The classes in this module favor the server type that is simplest to
|
||||
write: a synchronous TCP/IP server. This is bad class design, but
|
||||
save some typing. (There's also the issue that a deep class hierarchy
|
||||
slows down method lookups.)
|
||||
|
||||
There are five classes in an inheritance diagram, four of which represent
|
||||
synchronous servers of four types:
|
||||
|
||||
+------------+
|
||||
| BaseServer |
|
||||
+------------+
|
||||
|
|
||||
v
|
||||
+-----------+ +------------------+
|
||||
| TCPServer |------->| UnixStreamServer |
|
||||
+-----------+ +------------------+
|
||||
|
|
||||
v
|
||||
+-----------+ +--------------------+
|
||||
| UDPServer |------->| UnixDatagramServer |
|
||||
+-----------+ +--------------------+
|
||||
|
||||
Note that UnixDatagramServer derives from UDPServer, not from
|
||||
UnixStreamServer -- the only difference between an IP and a Unix
|
||||
stream server is the address family, which is simply repeated in both
|
||||
unix server classes.
|
||||
|
||||
Forking and threading versions of each type of server can be created
|
||||
using the ForkingMixIn and ThreadingMixIn mix-in classes. For
|
||||
instance, a threading UDP server class is created as follows:
|
||||
|
||||
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
|
||||
|
||||
The Mix-in class must come first, since it overrides a method defined
|
||||
in UDPServer! Setting the various member variables also changes
|
||||
the behavior of the underlying server mechanism.
|
||||
|
||||
To implement a service, you must derive a class from
|
||||
BaseRequestHandler and redefine its handle() method. You can then run
|
||||
various versions of the service by combining one of the server classes
|
||||
with your request handler class.
|
||||
|
||||
The request handler class must be different for datagram or stream
|
||||
services. This can be hidden by using the request handler
|
||||
subclasses StreamRequestHandler or DatagramRequestHandler.
|
||||
|
||||
Of course, you still have to use your head!
|
||||
|
||||
For instance, it makes no sense to use a forking server if the service
|
||||
contains state in memory that can be modified by requests (since the
|
||||
modifications in the child process would never reach the initial state
|
||||
kept in the parent process and passed to each child). In this case,
|
||||
you can use a threading server, but you will probably have to use
|
||||
locks to avoid two requests that come in nearly simultaneous to apply
|
||||
conflicting changes to the server state.
|
||||
|
||||
On the other hand, if you are building e.g. an HTTP server, where all
|
||||
data is stored externally (e.g. in the file system), a synchronous
|
||||
class will essentially render the service "deaf" while one request is
|
||||
being handled -- which may be for a very long time if a client is slow
|
||||
to read all the data it has requested. Here a threading or forking
|
||||
server is appropriate.
|
||||
|
||||
In some cases, it may be appropriate to process part of a request
|
||||
synchronously, but to finish processing in a forked child depending on
|
||||
the request data. This can be implemented by using a synchronous
|
||||
server and doing an explicit fork in the request handler class
|
||||
handle() method.
|
||||
|
||||
Another approach to handling multiple simultaneous requests in an
|
||||
environment that supports neither threads nor fork (or where these are
|
||||
too expensive or inappropriate for the service) is to maintain an
|
||||
explicit table of partially finished requests and to use select() to
|
||||
decide which request to work on next (or whether to handle a new
|
||||
incoming request). This is particularly important for stream services
|
||||
where each client can potentially be connected for a long time (if
|
||||
threads or subprocesses cannot be used).
|
||||
|
||||
Future work:
|
||||
- Standard classes for Sun RPC (which uses either UDP or TCP)
|
||||
- Standard mix-in classes to implement various authentication
|
||||
and encryption schemes
|
||||
- Standard framework for select-based multiplexing
|
||||
|
||||
XXX Open problems:
|
||||
- What to do with out-of-band data?
|
||||
|
||||
BaseServer:
|
||||
- split generic "request" functionality out into BaseServer class.
|
||||
Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
|
||||
|
||||
example: read entries from a SQL database (requires overriding
|
||||
get_request() to return a table entry from the database).
|
||||
entry is processed by a RequestHandlerClass.
|
||||
|
||||
"""
|
||||
|
||||
# Author of the BaseServer patch: Luke Kenneth Casson Leighton
|
||||
|
||||
# XXX Warning!
|
||||
# There is a test suite for this module, but it cannot be run by the
|
||||
# standard regression test.
|
||||
# To run it manually, run Lib/test/test_socketserver.py.
|
||||
|
||||
__version__ = "0.4"
|
||||
|
||||
|
||||
import socket
|
||||
import select
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
try:
|
||||
import threading
|
||||
except ImportError:
|
||||
import dummy_threading as threading
|
||||
|
||||
__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
|
||||
"ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
|
||||
"StreamRequestHandler","DatagramRequestHandler",
|
||||
"ThreadingMixIn", "ForkingMixIn"]
|
||||
if hasattr(socket, "AF_UNIX"):
|
||||
__all__.extend(["UnixStreamServer","UnixDatagramServer",
|
||||
"ThreadingUnixStreamServer",
|
||||
"ThreadingUnixDatagramServer"])
|
||||
|
||||
def _eintr_retry(func, *args):
|
||||
"""restart a system call interrupted by EINTR"""
|
||||
while True:
|
||||
try:
|
||||
return func(*args)
|
||||
except (OSError, select.error) as e:
|
||||
if e.args[0] != errno.EINTR:
|
||||
raise
|
||||
|
||||
class BaseServer:
|
||||
|
||||
"""Base class for server classes.
|
||||
|
||||
Methods for the caller:
|
||||
|
||||
- __init__(server_address, RequestHandlerClass)
|
||||
- serve_forever(poll_interval=0.5)
|
||||
- shutdown()
|
||||
- handle_request() # if you do not use serve_forever()
|
||||
- fileno() -> int # for select()
|
||||
|
||||
Methods that may be overridden:
|
||||
|
||||
- server_bind()
|
||||
- server_activate()
|
||||
- get_request() -> request, client_address
|
||||
- handle_timeout()
|
||||
- verify_request(request, client_address)
|
||||
- server_close()
|
||||
- process_request(request, client_address)
|
||||
- shutdown_request(request)
|
||||
- close_request(request)
|
||||
- handle_error()
|
||||
|
||||
Methods for derived classes:
|
||||
|
||||
- finish_request(request, client_address)
|
||||
|
||||
Class variables that may be overridden by derived classes or
|
||||
instances:
|
||||
|
||||
- timeout
|
||||
- address_family
|
||||
- socket_type
|
||||
- allow_reuse_address
|
||||
|
||||
Instance variables:
|
||||
|
||||
- RequestHandlerClass
|
||||
- socket
|
||||
|
||||
"""
|
||||
|
||||
timeout = None
|
||||
|
||||
def __init__(self, server_address, RequestHandlerClass):
|
||||
"""Constructor. May be extended, do not override."""
|
||||
self.server_address = server_address
|
||||
self.RequestHandlerClass = RequestHandlerClass
|
||||
self.__is_shut_down = threading.Event()
|
||||
self.__shutdown_request = False
|
||||
|
||||
def server_activate(self):
|
||||
"""Called by constructor to activate the server.
|
||||
|
||||
May be overridden.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def serve_forever(self, poll_interval=0.5):
|
||||
"""Handle one request at a time until shutdown.
|
||||
|
||||
Polls for shutdown every poll_interval seconds. Ignores
|
||||
self.timeout. If you need to do periodic tasks, do them in
|
||||
another thread.
|
||||
"""
|
||||
self.__is_shut_down.clear()
|
||||
try:
|
||||
while not self.__shutdown_request:
|
||||
# XXX: Consider using another file descriptor or
|
||||
# connecting to the socket to wake this up instead of
|
||||
# polling. Polling reduces our responsiveness to a
|
||||
# shutdown request and wastes cpu at all other times.
|
||||
r, w, e = _eintr_retry(select.select, [self], [], [],
|
||||
poll_interval)
|
||||
if self in r:
|
||||
self._handle_request_noblock()
|
||||
finally:
|
||||
self.__shutdown_request = False
|
||||
self.__is_shut_down.set()
|
||||
|
||||
def shutdown(self):
|
||||
"""Stops the serve_forever loop.
|
||||
|
||||
Blocks until the loop has finished. This must be called while
|
||||
serve_forever() is running in another thread, or it will
|
||||
deadlock.
|
||||
"""
|
||||
self.__shutdown_request = True
|
||||
self.__is_shut_down.wait()
|
||||
|
||||
# The distinction between handling, getting, processing and
|
||||
# finishing a request is fairly arbitrary. Remember:
|
||||
#
|
||||
# - handle_request() is the top-level call. It calls
|
||||
# select, get_request(), verify_request() and process_request()
|
||||
# - get_request() is different for stream or datagram sockets
|
||||
# - process_request() is the place that may fork a new process
|
||||
# or create a new thread to finish the request
|
||||
# - finish_request() instantiates the request handler class;
|
||||
# this constructor will handle the request all by itself
|
||||
|
||||
def handle_request(self):
|
||||
"""Handle one request, possibly blocking.
|
||||
|
||||
Respects self.timeout.
|
||||
"""
|
||||
# Support people who used socket.settimeout() to escape
|
||||
# handle_request before self.timeout was available.
|
||||
timeout = self.socket.gettimeout()
|
||||
if timeout is None:
|
||||
timeout = self.timeout
|
||||
elif self.timeout is not None:
|
||||
timeout = min(timeout, self.timeout)
|
||||
fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
|
||||
if not fd_sets[0]:
|
||||
self.handle_timeout()
|
||||
return
|
||||
self._handle_request_noblock()
|
||||
|
||||
def _handle_request_noblock(self):
|
||||
"""Handle one request, without blocking.
|
||||
|
||||
I assume that select.select has returned that the socket is
|
||||
readable before this function was called, so there should be
|
||||
no risk of blocking in get_request().
|
||||
"""
|
||||
try:
|
||||
request, client_address = self.get_request()
|
||||
except socket.error:
|
||||
return
|
||||
if self.verify_request(request, client_address):
|
||||
try:
|
||||
self.process_request(request, client_address)
|
||||
except:
|
||||
self.handle_error(request, client_address)
|
||||
self.shutdown_request(request)
|
||||
|
||||
def handle_timeout(self):
|
||||
"""Called if no new request arrives within self.timeout.
|
||||
|
||||
Overridden by ForkingMixIn.
|
||||
"""
|
||||
pass
|
||||
|
||||
def verify_request(self, request, client_address):
|
||||
"""Verify the request. May be overridden.
|
||||
|
||||
Return True if we should proceed with this request.
|
||||
|
||||
"""
|
||||
return True
|
||||
|
||||
def process_request(self, request, client_address):
|
||||
"""Call finish_request.
|
||||
|
||||
Overridden by ForkingMixIn and ThreadingMixIn.
|
||||
|
||||
"""
|
||||
self.finish_request(request, client_address)
|
||||
self.shutdown_request(request)
|
||||
|
||||
def server_close(self):
|
||||
"""Called to clean-up the server.
|
||||
|
||||
May be overridden.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def finish_request(self, request, client_address):
|
||||
"""Finish one request by instantiating RequestHandlerClass."""
|
||||
self.RequestHandlerClass(request, client_address, self)
|
||||
|
||||
def shutdown_request(self, request):
|
||||
"""Called to shutdown and close an individual request."""
|
||||
self.close_request(request)
|
||||
|
||||
def close_request(self, request):
|
||||
"""Called to clean up an individual request."""
|
||||
pass
|
||||
|
||||
def handle_error(self, request, client_address):
|
||||
"""Handle an error gracefully. May be overridden.
|
||||
|
||||
The default is to print a traceback and continue.
|
||||
|
||||
"""
|
||||
print '-'*40
|
||||
print 'Exception happened during processing of request from',
|
||||
print client_address
|
||||
import traceback
|
||||
traceback.print_exc() # XXX But this goes to stderr!
|
||||
print '-'*40
|
||||
|
||||
|
||||
class TCPServer(BaseServer):
|
||||
|
||||
"""Base class for various socket-based server classes.
|
||||
|
||||
Defaults to synchronous IP stream (i.e., TCP).
|
||||
|
||||
Methods for the caller:
|
||||
|
||||
- __init__(server_address, RequestHandlerClass, bind_and_activate=True)
|
||||
- serve_forever(poll_interval=0.5)
|
||||
- shutdown()
|
||||
- handle_request() # if you don't use serve_forever()
|
||||
- fileno() -> int # for select()
|
||||
|
||||
Methods that may be overridden:
|
||||
|
||||
- server_bind()
|
||||
- server_activate()
|
||||
- get_request() -> request, client_address
|
||||
- handle_timeout()
|
||||
- verify_request(request, client_address)
|
||||
- process_request(request, client_address)
|
||||
- shutdown_request(request)
|
||||
- close_request(request)
|
||||
- handle_error()
|
||||
|
||||
Methods for derived classes:
|
||||
|
||||
- finish_request(request, client_address)
|
||||
|
||||
Class variables that may be overridden by derived classes or
|
||||
instances:
|
||||
|
||||
- timeout
|
||||
- address_family
|
||||
- socket_type
|
||||
- request_queue_size (only for stream sockets)
|
||||
- allow_reuse_address
|
||||
|
||||
Instance variables:
|
||||
|
||||
- server_address
|
||||
- RequestHandlerClass
|
||||
- socket
|
||||
|
||||
"""
|
||||
|
||||
address_family = socket.AF_INET
|
||||
|
||||
socket_type = socket.SOCK_STREAM
|
||||
|
||||
request_queue_size = 5
|
||||
|
||||
allow_reuse_address = False
|
||||
|
||||
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
|
||||
"""Constructor. May be extended, do not override."""
|
||||
BaseServer.__init__(self, server_address, RequestHandlerClass)
|
||||
self.socket = socket.socket(self.address_family,
|
||||
self.socket_type)
|
||||
if bind_and_activate:
|
||||
self.server_bind()
|
||||
self.server_activate()
|
||||
|
||||
def server_bind(self):
|
||||
"""Called by constructor to bind the socket.
|
||||
|
||||
May be overridden.
|
||||
|
||||
"""
|
||||
if self.allow_reuse_address:
|
||||
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.socket.bind(self.server_address)
|
||||
self.server_address = self.socket.getsockname()
|
||||
|
||||
def server_activate(self):
|
||||
"""Called by constructor to activate the server.
|
||||
|
||||
May be overridden.
|
||||
|
||||
"""
|
||||
self.socket.listen(self.request_queue_size)
|
||||
|
||||
def server_close(self):
|
||||
"""Called to clean-up the server.
|
||||
|
||||
May be overridden.
|
||||
|
||||
"""
|
||||
self.socket.close()
|
||||
|
||||
def fileno(self):
|
||||
"""Return socket file number.
|
||||
|
||||
Interface required by select().
|
||||
|
||||
"""
|
||||
return self.socket.fileno()
|
||||
|
||||
def get_request(self):
|
||||
"""Get the request and client address from the socket.
|
||||
|
||||
May be overridden.
|
||||
|
||||
"""
|
||||
return self.socket.accept()
|
||||
|
||||
def shutdown_request(self, request):
|
||||
"""Called to shutdown and close an individual request."""
|
||||
try:
|
||||
#explicitly shutdown. socket.close() merely releases
|
||||
#the socket and waits for GC to perform the actual close.
|
||||
request.shutdown(socket.SHUT_WR)
|
||||
except socket.error:
|
||||
pass #some platforms may raise ENOTCONN here
|
||||
self.close_request(request)
|
||||
|
||||
def close_request(self, request):
|
||||
"""Called to clean up an individual request."""
|
||||
request.close()
|
||||
|
||||
|
||||
class UDPServer(TCPServer):
|
||||
|
||||
"""UDP server class."""
|
||||
|
||||
allow_reuse_address = False
|
||||
|
||||
socket_type = socket.SOCK_DGRAM
|
||||
|
||||
max_packet_size = 8192
|
||||
|
||||
def get_request(self):
|
||||
data, client_addr = self.socket.recvfrom(self.max_packet_size)
|
||||
return (data, self.socket), client_addr
|
||||
|
||||
def server_activate(self):
|
||||
# No need to call listen() for UDP.
|
||||
pass
|
||||
|
||||
def shutdown_request(self, request):
|
||||
# No need to shutdown anything.
|
||||
self.close_request(request)
|
||||
|
||||
def close_request(self, request):
|
||||
# No need to close anything.
|
||||
pass
|
||||
|
||||
class ForkingMixIn:
|
||||
|
||||
"""Mix-in class to handle each request in a new process."""
|
||||
|
||||
timeout = 300
|
||||
active_children = None
|
||||
max_children = 40
|
||||
|
||||
def collect_children(self):
|
||||
"""Internal routine to wait for children that have exited."""
|
||||
if self.active_children is None:
|
||||
return
|
||||
|
||||
# If we're above the max number of children, wait and reap them until
|
||||
# we go back below threshold. Note that we use waitpid(-1) below to be
|
||||
# able to collect children in size(<defunct children>) syscalls instead
|
||||
# of size(<children>): the downside is that this might reap children
|
||||
# which we didn't spawn, which is why we only resort to this when we're
|
||||
# above max_children.
|
||||
while len(self.active_children) >= self.max_children:
|
||||
try:
|
||||
pid, _ = os.waitpid(-1, 0)
|
||||
self.active_children.discard(pid)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ECHILD:
|
||||
# we don't have any children, we're done
|
||||
self.active_children.clear()
|
||||
elif e.errno != errno.EINTR:
|
||||
break
|
||||
|
||||
# Now reap all defunct children.
|
||||
for pid in self.active_children.copy():
|
||||
try:
|
||||
pid, _ = os.waitpid(pid, os.WNOHANG)
|
||||
# if the child hasn't exited yet, pid will be 0 and ignored by
|
||||
# discard() below
|
||||
self.active_children.discard(pid)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ECHILD:
|
||||
# someone else reaped it
|
||||
self.active_children.discard(pid)
|
||||
|
||||
def handle_timeout(self):
|
||||
"""Wait for zombies after self.timeout seconds of inactivity.
|
||||
|
||||
May be extended, do not override.
|
||||
"""
|
||||
self.collect_children()
|
||||
|
||||
def process_request(self, request, client_address):
|
||||
"""Fork a new subprocess to process the request."""
|
||||
self.collect_children()
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
# Parent process
|
||||
if self.active_children is None:
|
||||
self.active_children = set()
|
||||
self.active_children.add(pid)
|
||||
self.close_request(request) #close handle in parent process
|
||||
return
|
||||
else:
|
||||
# Child process.
|
||||
# This must never return, hence os._exit()!
|
||||
try:
|
||||
self.finish_request(request, client_address)
|
||||
self.shutdown_request(request)
|
||||
os._exit(0)
|
||||
except:
|
||||
try:
|
||||
self.handle_error(request, client_address)
|
||||
self.shutdown_request(request)
|
||||
finally:
|
||||
os._exit(1)
|
||||
|
||||
|
||||
class ThreadingMixIn:
|
||||
"""Mix-in class to handle each request in a new thread."""
|
||||
|
||||
# Decides how threads will act upon termination of the
|
||||
# main process
|
||||
daemon_threads = False
|
||||
|
||||
def process_request_thread(self, request, client_address):
|
||||
"""Same as in BaseServer but as a thread.
|
||||
|
||||
In addition, exception handling is done here.
|
||||
|
||||
"""
|
||||
try:
|
||||
self.finish_request(request, client_address)
|
||||
self.shutdown_request(request)
|
||||
except:
|
||||
self.handle_error(request, client_address)
|
||||
self.shutdown_request(request)
|
||||
|
||||
def process_request(self, request, client_address):
|
||||
"""Start a new thread to process the request."""
|
||||
t = threading.Thread(target = self.process_request_thread,
|
||||
args = (request, client_address))
|
||||
t.daemon = self.daemon_threads
|
||||
t.start()
|
||||
|
||||
|
||||
class ForkingUDPServer(ForkingMixIn, UDPServer): pass
|
||||
class ForkingTCPServer(ForkingMixIn, TCPServer): pass
|
||||
|
||||
class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
|
||||
class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
|
||||
|
||||
if hasattr(socket, 'AF_UNIX'):
|
||||
|
||||
class UnixStreamServer(TCPServer):
|
||||
address_family = socket.AF_UNIX
|
||||
|
||||
class UnixDatagramServer(UDPServer):
|
||||
address_family = socket.AF_UNIX
|
||||
|
||||
class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
|
||||
|
||||
class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
|
||||
|
||||
class BaseRequestHandler:
|
||||
|
||||
"""Base class for request handler classes.
|
||||
|
||||
This class is instantiated for each request to be handled. The
|
||||
constructor sets the instance variables request, client_address
|
||||
and server, and then calls the handle() method. To implement a
|
||||
specific service, all you need to do is to derive a class which
|
||||
defines a handle() method.
|
||||
|
||||
The handle() method can find the request as self.request, the
|
||||
client address as self.client_address, and the server (in case it
|
||||
needs access to per-server information) as self.server. Since a
|
||||
separate instance is created for each request, the handle() method
|
||||
can define arbitrary other instance variariables.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, request, client_address, server):
|
||||
self.request = request
|
||||
self.client_address = client_address
|
||||
self.server = server
|
||||
self.setup()
|
||||
try:
|
||||
self.handle()
|
||||
finally:
|
||||
self.finish()
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
def handle(self):
|
||||
pass
|
||||
|
||||
def finish(self):
|
||||
pass
|
||||
|
||||
|
||||
# The following two classes make it possible to use the same service
|
||||
# class for stream or datagram servers.
|
||||
# Each class sets up these instance variables:
|
||||
# - rfile: a file object from which receives the request is read
|
||||
# - wfile: a file object to which the reply is written
|
||||
# When the handle() method returns, wfile is flushed properly
|
||||
|
||||
|
||||
class StreamRequestHandler(BaseRequestHandler):
|
||||
|
||||
"""Define self.rfile and self.wfile for stream sockets."""
|
||||
|
||||
# Default buffer sizes for rfile, wfile.
|
||||
# We default rfile to buffered because otherwise it could be
|
||||
# really slow for large data (a getc() call per byte); we make
|
||||
# wfile unbuffered because (a) often after a write() we want to
|
||||
# read and we need to flush the line; (b) big writes to unbuffered
|
||||
# files are typically optimized by stdio even when big reads
|
||||
# aren't.
|
||||
rbufsize = -1
|
||||
wbufsize = 0
|
||||
|
||||
# A timeout to apply to the request socket, if not None.
|
||||
timeout = None
|
||||
|
||||
# Disable nagle algorithm for this socket, if True.
|
||||
# Use only when wbufsize != 0, to avoid small packets.
|
||||
disable_nagle_algorithm = False
|
||||
|
||||
def setup(self):
|
||||
self.connection = self.request
|
||||
if self.timeout is not None:
|
||||
self.connection.settimeout(self.timeout)
|
||||
if self.disable_nagle_algorithm:
|
||||
self.connection.setsockopt(socket.IPPROTO_TCP,
|
||||
socket.TCP_NODELAY, True)
|
||||
self.rfile = self.connection.makefile('rb', self.rbufsize)
|
||||
self.wfile = self.connection.makefile('wb', self.wbufsize)
|
||||
|
||||
def finish(self):
|
||||
if not self.wfile.closed:
|
||||
try:
|
||||
self.wfile.flush()
|
||||
except socket.error:
|
||||
# An final socket error may have occurred here, such as
|
||||
# the local error ECONNABORTED.
|
||||
pass
|
||||
self.wfile.close()
|
||||
self.rfile.close()
|
||||
|
||||
|
||||
class DatagramRequestHandler(BaseRequestHandler):
|
||||
|
||||
# XXX Regrettably, I cannot get this working on Linux;
|
||||
# s.recvfrom() doesn't return a meaningful client address.
|
||||
|
||||
"""Define self.rfile and self.wfile for datagram sockets."""
|
||||
|
||||
def setup(self):
|
||||
try:
|
||||
from cStringIO import StringIO
|
||||
except ImportError:
|
||||
from StringIO import StringIO
|
||||
self.packet, self.socket = self.request
|
||||
self.rfile = StringIO(self.packet)
|
||||
self.wfile = StringIO()
|
||||
|
||||
def finish(self):
|
||||
self.socket.sendto(self.wfile.getvalue(), self.client_address)
|
Binary file not shown.
324
PythonHome/Lib/StringIO.py
Normal file
324
PythonHome/Lib/StringIO.py
Normal file
@ -0,0 +1,324 @@
|
||||
r"""File-like objects that read from or write to a string buffer.
|
||||
|
||||
This implements (nearly) all stdio methods.
|
||||
|
||||
f = StringIO() # ready for writing
|
||||
f = StringIO(buf) # ready for reading
|
||||
f.close() # explicitly release resources held
|
||||
flag = f.isatty() # always false
|
||||
pos = f.tell() # get current position
|
||||
f.seek(pos) # set current position
|
||||
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
|
||||
buf = f.read() # read until EOF
|
||||
buf = f.read(n) # read up to n bytes
|
||||
buf = f.readline() # read until end of line ('\n') or EOF
|
||||
list = f.readlines()# list of f.readline() results until EOF
|
||||
f.truncate([size]) # truncate file at to at most size (default: current pos)
|
||||
f.write(buf) # write at current position
|
||||
f.writelines(list) # for line in list: f.write(line)
|
||||
f.getvalue() # return whole file's contents as a string
|
||||
|
||||
Notes:
|
||||
- Using a real file is often faster (but less convenient).
|
||||
- There's also a much faster implementation in C, called cStringIO, but
|
||||
it's not subclassable.
|
||||
- fileno() is left unimplemented so that code which uses it triggers
|
||||
an exception early.
|
||||
- Seeking far beyond EOF and then writing will insert real null
|
||||
bytes that occupy space in the buffer.
|
||||
- There's a simple test set (see end of this file).
|
||||
"""
|
||||
try:
|
||||
from errno import EINVAL
|
||||
except ImportError:
|
||||
EINVAL = 22
|
||||
|
||||
__all__ = ["StringIO"]
|
||||
|
||||
def _complain_ifclosed(closed):
|
||||
if closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
|
||||
class StringIO:
|
||||
"""class StringIO([buffer])
|
||||
|
||||
When a StringIO object is created, it can be initialized to an existing
|
||||
string by passing the string to the constructor. If no string is given,
|
||||
the StringIO will start empty.
|
||||
|
||||
The StringIO object can accept either Unicode or 8-bit strings, but
|
||||
mixing the two may take some care. If both are used, 8-bit strings that
|
||||
cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
|
||||
a UnicodeError to be raised when getvalue() is called.
|
||||
"""
|
||||
def __init__(self, buf = ''):
|
||||
# Force self.buf to be a string or unicode
|
||||
if not isinstance(buf, basestring):
|
||||
buf = str(buf)
|
||||
self.buf = buf
|
||||
self.len = len(buf)
|
||||
self.buflist = []
|
||||
self.pos = 0
|
||||
self.closed = False
|
||||
self.softspace = 0
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
"""A file object is its own iterator, for example iter(f) returns f
|
||||
(unless f is closed). When a file is used as an iterator, typically
|
||||
in a for loop (for example, for line in f: print line), the next()
|
||||
method is called repeatedly. This method returns the next input line,
|
||||
or raises StopIteration when EOF is hit.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
r = self.readline()
|
||||
if not r:
|
||||
raise StopIteration
|
||||
return r
|
||||
|
||||
def close(self):
|
||||
"""Free the memory buffer.
|
||||
"""
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
del self.buf, self.pos
|
||||
|
||||
def isatty(self):
|
||||
"""Returns False because StringIO objects are not connected to a
|
||||
tty-like device.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
return False
|
||||
|
||||
def seek(self, pos, mode = 0):
|
||||
"""Set the file's current position.
|
||||
|
||||
The mode argument is optional and defaults to 0 (absolute file
|
||||
positioning); other values are 1 (seek relative to the current
|
||||
position) and 2 (seek relative to the file's end).
|
||||
|
||||
There is no return value.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
if self.buflist:
|
||||
self.buf += ''.join(self.buflist)
|
||||
self.buflist = []
|
||||
if mode == 1:
|
||||
pos += self.pos
|
||||
elif mode == 2:
|
||||
pos += self.len
|
||||
self.pos = max(0, pos)
|
||||
|
||||
def tell(self):
|
||||
"""Return the file's current position."""
|
||||
_complain_ifclosed(self.closed)
|
||||
return self.pos
|
||||
|
||||
def read(self, n = -1):
|
||||
"""Read at most size bytes from the file
|
||||
(less if the read hits EOF before obtaining size bytes).
|
||||
|
||||
If the size argument is negative or omitted, read all data until EOF
|
||||
is reached. The bytes are returned as a string object. An empty
|
||||
string is returned when EOF is encountered immediately.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
if self.buflist:
|
||||
self.buf += ''.join(self.buflist)
|
||||
self.buflist = []
|
||||
if n is None or n < 0:
|
||||
newpos = self.len
|
||||
else:
|
||||
newpos = min(self.pos+n, self.len)
|
||||
r = self.buf[self.pos:newpos]
|
||||
self.pos = newpos
|
||||
return r
|
||||
|
||||
def readline(self, length=None):
|
||||
r"""Read one entire line from the file.
|
||||
|
||||
A trailing newline character is kept in the string (but may be absent
|
||||
when a file ends with an incomplete line). If the size argument is
|
||||
present and non-negative, it is a maximum byte count (including the
|
||||
trailing newline) and an incomplete line may be returned.
|
||||
|
||||
An empty string is returned only when EOF is encountered immediately.
|
||||
|
||||
Note: Unlike stdio's fgets(), the returned string contains null
|
||||
characters ('\0') if they occurred in the input.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
if self.buflist:
|
||||
self.buf += ''.join(self.buflist)
|
||||
self.buflist = []
|
||||
i = self.buf.find('\n', self.pos)
|
||||
if i < 0:
|
||||
newpos = self.len
|
||||
else:
|
||||
newpos = i+1
|
||||
if length is not None and length >= 0:
|
||||
if self.pos + length < newpos:
|
||||
newpos = self.pos + length
|
||||
r = self.buf[self.pos:newpos]
|
||||
self.pos = newpos
|
||||
return r
|
||||
|
||||
def readlines(self, sizehint = 0):
|
||||
"""Read until EOF using readline() and return a list containing the
|
||||
lines thus read.
|
||||
|
||||
If the optional sizehint argument is present, instead of reading up
|
||||
to EOF, whole lines totalling approximately sizehint bytes (or more
|
||||
to accommodate a final whole line).
|
||||
"""
|
||||
total = 0
|
||||
lines = []
|
||||
line = self.readline()
|
||||
while line:
|
||||
lines.append(line)
|
||||
total += len(line)
|
||||
if 0 < sizehint <= total:
|
||||
break
|
||||
line = self.readline()
|
||||
return lines
|
||||
|
||||
def truncate(self, size=None):
|
||||
"""Truncate the file's size.
|
||||
|
||||
If the optional size argument is present, the file is truncated to
|
||||
(at most) that size. The size defaults to the current position.
|
||||
The current file position is not changed unless the position
|
||||
is beyond the new file size.
|
||||
|
||||
If the specified size exceeds the file's current size, the
|
||||
file remains unchanged.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
if size is None:
|
||||
size = self.pos
|
||||
elif size < 0:
|
||||
raise IOError(EINVAL, "Negative size not allowed")
|
||||
elif size < self.pos:
|
||||
self.pos = size
|
||||
self.buf = self.getvalue()[:size]
|
||||
self.len = size
|
||||
|
||||
def write(self, s):
|
||||
"""Write a string to the file.
|
||||
|
||||
There is no return value.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
if not s: return
|
||||
# Force s to be a string or unicode
|
||||
if not isinstance(s, basestring):
|
||||
s = str(s)
|
||||
spos = self.pos
|
||||
slen = self.len
|
||||
if spos == slen:
|
||||
self.buflist.append(s)
|
||||
self.len = self.pos = spos + len(s)
|
||||
return
|
||||
if spos > slen:
|
||||
self.buflist.append('\0'*(spos - slen))
|
||||
slen = spos
|
||||
newpos = spos + len(s)
|
||||
if spos < slen:
|
||||
if self.buflist:
|
||||
self.buf += ''.join(self.buflist)
|
||||
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
|
||||
self.buf = ''
|
||||
if newpos > slen:
|
||||
slen = newpos
|
||||
else:
|
||||
self.buflist.append(s)
|
||||
slen = newpos
|
||||
self.len = slen
|
||||
self.pos = newpos
|
||||
|
||||
def writelines(self, iterable):
|
||||
"""Write a sequence of strings to the file. The sequence can be any
|
||||
iterable object producing strings, typically a list of strings. There
|
||||
is no return value.
|
||||
|
||||
(The name is intended to match readlines(); writelines() does not add
|
||||
line separators.)
|
||||
"""
|
||||
write = self.write
|
||||
for line in iterable:
|
||||
write(line)
|
||||
|
||||
def flush(self):
|
||||
"""Flush the internal buffer
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
|
||||
def getvalue(self):
|
||||
"""
|
||||
Retrieve the entire contents of the "file" at any time before
|
||||
the StringIO object's close() method is called.
|
||||
|
||||
The StringIO object can accept either Unicode or 8-bit strings,
|
||||
but mixing the two may take some care. If both are used, 8-bit
|
||||
strings that cannot be interpreted as 7-bit ASCII (that use the
|
||||
8th bit) will cause a UnicodeError to be raised when getvalue()
|
||||
is called.
|
||||
"""
|
||||
_complain_ifclosed(self.closed)
|
||||
if self.buflist:
|
||||
self.buf += ''.join(self.buflist)
|
||||
self.buflist = []
|
||||
return self.buf
|
||||
|
||||
|
||||
# A little test suite
|
||||
|
||||
def test():
|
||||
import sys
|
||||
if sys.argv[1:]:
|
||||
file = sys.argv[1]
|
||||
else:
|
||||
file = '/etc/passwd'
|
||||
lines = open(file, 'r').readlines()
|
||||
text = open(file, 'r').read()
|
||||
f = StringIO()
|
||||
for line in lines[:-2]:
|
||||
f.write(line)
|
||||
f.writelines(lines[-2:])
|
||||
if f.getvalue() != text:
|
||||
raise RuntimeError, 'write failed'
|
||||
length = f.tell()
|
||||
print 'File length =', length
|
||||
f.seek(len(lines[0]))
|
||||
f.write(lines[1])
|
||||
f.seek(0)
|
||||
print 'First line =', repr(f.readline())
|
||||
print 'Position =', f.tell()
|
||||
line = f.readline()
|
||||
print 'Second line =', repr(line)
|
||||
f.seek(-len(line), 1)
|
||||
line2 = f.read(len(line))
|
||||
if line != line2:
|
||||
raise RuntimeError, 'bad result after seek back'
|
||||
f.seek(len(line2), 1)
|
||||
list = f.readlines()
|
||||
line = list[-1]
|
||||
f.seek(f.tell() - len(line))
|
||||
line2 = f.read()
|
||||
if line != line2:
|
||||
raise RuntimeError, 'bad result after seek back from EOF'
|
||||
print 'Read', len(list), 'more lines'
|
||||
print 'File length =', f.tell()
|
||||
if f.tell() != length:
|
||||
raise RuntimeError, 'bad length'
|
||||
f.truncate(length/2)
|
||||
f.seek(0, 2)
|
||||
print 'Truncated length =', f.tell()
|
||||
if f.tell() != length/2:
|
||||
raise RuntimeError, 'truncate did not adjust length'
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
Binary file not shown.
180
PythonHome/Lib/UserDict.py
Normal file
180
PythonHome/Lib/UserDict.py
Normal file
@ -0,0 +1,180 @@
|
||||
"""A more or less complete user-defined wrapper around dictionary objects."""
|
||||
|
||||
class UserDict:
|
||||
def __init__(self, dict=None, **kwargs):
|
||||
self.data = {}
|
||||
if dict is not None:
|
||||
self.update(dict)
|
||||
if len(kwargs):
|
||||
self.update(kwargs)
|
||||
def __repr__(self): return repr(self.data)
|
||||
def __cmp__(self, dict):
|
||||
if isinstance(dict, UserDict):
|
||||
return cmp(self.data, dict.data)
|
||||
else:
|
||||
return cmp(self.data, dict)
|
||||
__hash__ = None # Avoid Py3k warning
|
||||
def __len__(self): return len(self.data)
|
||||
def __getitem__(self, key):
|
||||
if key in self.data:
|
||||
return self.data[key]
|
||||
if hasattr(self.__class__, "__missing__"):
|
||||
return self.__class__.__missing__(self, key)
|
||||
raise KeyError(key)
|
||||
def __setitem__(self, key, item): self.data[key] = item
|
||||
def __delitem__(self, key): del self.data[key]
|
||||
def clear(self): self.data.clear()
|
||||
def copy(self):
|
||||
if self.__class__ is UserDict:
|
||||
return UserDict(self.data.copy())
|
||||
import copy
|
||||
data = self.data
|
||||
try:
|
||||
self.data = {}
|
||||
c = copy.copy(self)
|
||||
finally:
|
||||
self.data = data
|
||||
c.update(self)
|
||||
return c
|
||||
def keys(self): return self.data.keys()
|
||||
def items(self): return self.data.items()
|
||||
def iteritems(self): return self.data.iteritems()
|
||||
def iterkeys(self): return self.data.iterkeys()
|
||||
def itervalues(self): return self.data.itervalues()
|
||||
def values(self): return self.data.values()
|
||||
def has_key(self, key): return key in self.data
|
||||
def update(self, dict=None, **kwargs):
|
||||
if dict is None:
|
||||
pass
|
||||
elif isinstance(dict, UserDict):
|
||||
self.data.update(dict.data)
|
||||
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
|
||||
self.data.update(dict)
|
||||
else:
|
||||
for k, v in dict.items():
|
||||
self[k] = v
|
||||
if len(kwargs):
|
||||
self.data.update(kwargs)
|
||||
def get(self, key, failobj=None):
|
||||
if key not in self:
|
||||
return failobj
|
||||
return self[key]
|
||||
def setdefault(self, key, failobj=None):
|
||||
if key not in self:
|
||||
self[key] = failobj
|
||||
return self[key]
|
||||
def pop(self, key, *args):
|
||||
return self.data.pop(key, *args)
|
||||
def popitem(self):
|
||||
return self.data.popitem()
|
||||
def __contains__(self, key):
|
||||
return key in self.data
|
||||
@classmethod
|
||||
def fromkeys(cls, iterable, value=None):
|
||||
d = cls()
|
||||
for key in iterable:
|
||||
d[key] = value
|
||||
return d
|
||||
|
||||
class IterableUserDict(UserDict):
|
||||
def __iter__(self):
|
||||
return iter(self.data)
|
||||
|
||||
import _abcoll
|
||||
_abcoll.MutableMapping.register(IterableUserDict)
|
||||
|
||||
|
||||
class DictMixin:
|
||||
# Mixin defining all dictionary methods for classes that already have
|
||||
# a minimum dictionary interface including getitem, setitem, delitem,
|
||||
# and keys. Without knowledge of the subclass constructor, the mixin
|
||||
# does not define __init__() or copy(). In addition to the four base
|
||||
# methods, progressively more efficiency comes with defining
|
||||
# __contains__(), __iter__(), and iteritems().
|
||||
|
||||
# second level definitions support higher levels
|
||||
def __iter__(self):
|
||||
for k in self.keys():
|
||||
yield k
|
||||
def has_key(self, key):
|
||||
try:
|
||||
self[key]
|
||||
except KeyError:
|
||||
return False
|
||||
return True
|
||||
def __contains__(self, key):
|
||||
return self.has_key(key)
|
||||
|
||||
# third level takes advantage of second level definitions
|
||||
def iteritems(self):
|
||||
for k in self:
|
||||
yield (k, self[k])
|
||||
def iterkeys(self):
|
||||
return self.__iter__()
|
||||
|
||||
# fourth level uses definitions from lower levels
|
||||
def itervalues(self):
|
||||
for _, v in self.iteritems():
|
||||
yield v
|
||||
def values(self):
|
||||
return [v for _, v in self.iteritems()]
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
def clear(self):
|
||||
for key in self.keys():
|
||||
del self[key]
|
||||
def setdefault(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
def pop(self, key, *args):
|
||||
if len(args) > 1:
|
||||
raise TypeError, "pop expected at most 2 arguments, got "\
|
||||
+ repr(1 + len(args))
|
||||
try:
|
||||
value = self[key]
|
||||
except KeyError:
|
||||
if args:
|
||||
return args[0]
|
||||
raise
|
||||
del self[key]
|
||||
return value
|
||||
def popitem(self):
|
||||
try:
|
||||
k, v = self.iteritems().next()
|
||||
except StopIteration:
|
||||
raise KeyError, 'container is empty'
|
||||
del self[k]
|
||||
return (k, v)
|
||||
def update(self, other=None, **kwargs):
|
||||
# Make progressively weaker assumptions about "other"
|
||||
if other is None:
|
||||
pass
|
||||
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
|
||||
for k, v in other.iteritems():
|
||||
self[k] = v
|
||||
elif hasattr(other, 'keys'):
|
||||
for k in other.keys():
|
||||
self[k] = other[k]
|
||||
else:
|
||||
for k, v in other:
|
||||
self[k] = v
|
||||
if kwargs:
|
||||
self.update(kwargs)
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
def __repr__(self):
|
||||
return repr(dict(self.iteritems()))
|
||||
def __cmp__(self, other):
|
||||
if other is None:
|
||||
return 1
|
||||
if isinstance(other, DictMixin):
|
||||
other = dict(other.iteritems())
|
||||
return cmp(dict(self.iteritems()), other)
|
||||
def __len__(self):
|
||||
return len(self.keys())
|
Binary file not shown.
88
PythonHome/Lib/UserList.py
Normal file
88
PythonHome/Lib/UserList.py
Normal file
@ -0,0 +1,88 @@
|
||||
"""A more or less complete user-defined wrapper around list objects."""
|
||||
|
||||
import collections
|
||||
|
||||
class UserList(collections.MutableSequence):
|
||||
def __init__(self, initlist=None):
|
||||
self.data = []
|
||||
if initlist is not None:
|
||||
# XXX should this accept an arbitrary sequence?
|
||||
if type(initlist) == type(self.data):
|
||||
self.data[:] = initlist
|
||||
elif isinstance(initlist, UserList):
|
||||
self.data[:] = initlist.data[:]
|
||||
else:
|
||||
self.data = list(initlist)
|
||||
def __repr__(self): return repr(self.data)
|
||||
def __lt__(self, other): return self.data < self.__cast(other)
|
||||
def __le__(self, other): return self.data <= self.__cast(other)
|
||||
def __eq__(self, other): return self.data == self.__cast(other)
|
||||
def __ne__(self, other): return self.data != self.__cast(other)
|
||||
def __gt__(self, other): return self.data > self.__cast(other)
|
||||
def __ge__(self, other): return self.data >= self.__cast(other)
|
||||
def __cast(self, other):
|
||||
if isinstance(other, UserList): return other.data
|
||||
else: return other
|
||||
def __cmp__(self, other):
|
||||
return cmp(self.data, self.__cast(other))
|
||||
__hash__ = None # Mutable sequence, so not hashable
|
||||
def __contains__(self, item): return item in self.data
|
||||
def __len__(self): return len(self.data)
|
||||
def __getitem__(self, i): return self.data[i]
|
||||
def __setitem__(self, i, item): self.data[i] = item
|
||||
def __delitem__(self, i): del self.data[i]
|
||||
def __getslice__(self, i, j):
|
||||
i = max(i, 0); j = max(j, 0)
|
||||
return self.__class__(self.data[i:j])
|
||||
def __setslice__(self, i, j, other):
|
||||
i = max(i, 0); j = max(j, 0)
|
||||
if isinstance(other, UserList):
|
||||
self.data[i:j] = other.data
|
||||
elif isinstance(other, type(self.data)):
|
||||
self.data[i:j] = other
|
||||
else:
|
||||
self.data[i:j] = list(other)
|
||||
def __delslice__(self, i, j):
|
||||
i = max(i, 0); j = max(j, 0)
|
||||
del self.data[i:j]
|
||||
def __add__(self, other):
|
||||
if isinstance(other, UserList):
|
||||
return self.__class__(self.data + other.data)
|
||||
elif isinstance(other, type(self.data)):
|
||||
return self.__class__(self.data + other)
|
||||
else:
|
||||
return self.__class__(self.data + list(other))
|
||||
def __radd__(self, other):
|
||||
if isinstance(other, UserList):
|
||||
return self.__class__(other.data + self.data)
|
||||
elif isinstance(other, type(self.data)):
|
||||
return self.__class__(other + self.data)
|
||||
else:
|
||||
return self.__class__(list(other) + self.data)
|
||||
def __iadd__(self, other):
|
||||
if isinstance(other, UserList):
|
||||
self.data += other.data
|
||||
elif isinstance(other, type(self.data)):
|
||||
self.data += other
|
||||
else:
|
||||
self.data += list(other)
|
||||
return self
|
||||
def __mul__(self, n):
|
||||
return self.__class__(self.data*n)
|
||||
__rmul__ = __mul__
|
||||
def __imul__(self, n):
|
||||
self.data *= n
|
||||
return self
|
||||
def append(self, item): self.data.append(item)
|
||||
def insert(self, i, item): self.data.insert(i, item)
|
||||
def pop(self, i=-1): return self.data.pop(i)
|
||||
def remove(self, item): self.data.remove(item)
|
||||
def count(self, item): return self.data.count(item)
|
||||
def index(self, item, *args): return self.data.index(item, *args)
|
||||
def reverse(self): self.data.reverse()
|
||||
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
|
||||
def extend(self, other):
|
||||
if isinstance(other, UserList):
|
||||
self.data.extend(other.data)
|
||||
else:
|
||||
self.data.extend(other)
|
Binary file not shown.
228
PythonHome/Lib/UserString.py
Normal file
228
PythonHome/Lib/UserString.py
Normal file
@ -0,0 +1,228 @@
|
||||
#!/usr/bin/env python
|
||||
## vim:ts=4:et:nowrap
|
||||
"""A user-defined wrapper around string objects
|
||||
|
||||
Note: string objects have grown methods in Python 1.6
|
||||
This module requires Python 1.6 or later.
|
||||
"""
|
||||
import sys
|
||||
import collections
|
||||
|
||||
__all__ = ["UserString","MutableString"]
|
||||
|
||||
class UserString(collections.Sequence):
|
||||
def __init__(self, seq):
|
||||
if isinstance(seq, basestring):
|
||||
self.data = seq
|
||||
elif isinstance(seq, UserString):
|
||||
self.data = seq.data[:]
|
||||
else:
|
||||
self.data = str(seq)
|
||||
def __str__(self): return str(self.data)
|
||||
def __repr__(self): return repr(self.data)
|
||||
def __int__(self): return int(self.data)
|
||||
def __long__(self): return long(self.data)
|
||||
def __float__(self): return float(self.data)
|
||||
def __complex__(self): return complex(self.data)
|
||||
def __hash__(self): return hash(self.data)
|
||||
|
||||
def __cmp__(self, string):
|
||||
if isinstance(string, UserString):
|
||||
return cmp(self.data, string.data)
|
||||
else:
|
||||
return cmp(self.data, string)
|
||||
def __contains__(self, char):
|
||||
return char in self.data
|
||||
|
||||
def __len__(self): return len(self.data)
|
||||
def __getitem__(self, index): return self.__class__(self.data[index])
|
||||
def __getslice__(self, start, end):
|
||||
start = max(start, 0); end = max(end, 0)
|
||||
return self.__class__(self.data[start:end])
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, UserString):
|
||||
return self.__class__(self.data + other.data)
|
||||
elif isinstance(other, basestring):
|
||||
return self.__class__(self.data + other)
|
||||
else:
|
||||
return self.__class__(self.data + str(other))
|
||||
def __radd__(self, other):
|
||||
if isinstance(other, basestring):
|
||||
return self.__class__(other + self.data)
|
||||
else:
|
||||
return self.__class__(str(other) + self.data)
|
||||
def __mul__(self, n):
|
||||
return self.__class__(self.data*n)
|
||||
__rmul__ = __mul__
|
||||
def __mod__(self, args):
|
||||
return self.__class__(self.data % args)
|
||||
|
||||
# the following methods are defined in alphabetical order:
|
||||
def capitalize(self): return self.__class__(self.data.capitalize())
|
||||
def center(self, width, *args):
|
||||
return self.__class__(self.data.center(width, *args))
|
||||
def count(self, sub, start=0, end=sys.maxint):
|
||||
return self.data.count(sub, start, end)
|
||||
def decode(self, encoding=None, errors=None): # XXX improve this?
|
||||
if encoding:
|
||||
if errors:
|
||||
return self.__class__(self.data.decode(encoding, errors))
|
||||
else:
|
||||
return self.__class__(self.data.decode(encoding))
|
||||
else:
|
||||
return self.__class__(self.data.decode())
|
||||
def encode(self, encoding=None, errors=None): # XXX improve this?
|
||||
if encoding:
|
||||
if errors:
|
||||
return self.__class__(self.data.encode(encoding, errors))
|
||||
else:
|
||||
return self.__class__(self.data.encode(encoding))
|
||||
else:
|
||||
return self.__class__(self.data.encode())
|
||||
def endswith(self, suffix, start=0, end=sys.maxint):
|
||||
return self.data.endswith(suffix, start, end)
|
||||
def expandtabs(self, tabsize=8):
|
||||
return self.__class__(self.data.expandtabs(tabsize))
|
||||
def find(self, sub, start=0, end=sys.maxint):
|
||||
return self.data.find(sub, start, end)
|
||||
def index(self, sub, start=0, end=sys.maxint):
|
||||
return self.data.index(sub, start, end)
|
||||
def isalpha(self): return self.data.isalpha()
|
||||
def isalnum(self): return self.data.isalnum()
|
||||
def isdecimal(self): return self.data.isdecimal()
|
||||
def isdigit(self): return self.data.isdigit()
|
||||
def islower(self): return self.data.islower()
|
||||
def isnumeric(self): return self.data.isnumeric()
|
||||
def isspace(self): return self.data.isspace()
|
||||
def istitle(self): return self.data.istitle()
|
||||
def isupper(self): return self.data.isupper()
|
||||
def join(self, seq): return self.data.join(seq)
|
||||
def ljust(self, width, *args):
|
||||
return self.__class__(self.data.ljust(width, *args))
|
||||
def lower(self): return self.__class__(self.data.lower())
|
||||
def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
|
||||
def partition(self, sep):
|
||||
return self.data.partition(sep)
|
||||
def replace(self, old, new, maxsplit=-1):
|
||||
return self.__class__(self.data.replace(old, new, maxsplit))
|
||||
def rfind(self, sub, start=0, end=sys.maxint):
|
||||
return self.data.rfind(sub, start, end)
|
||||
def rindex(self, sub, start=0, end=sys.maxint):
|
||||
return self.data.rindex(sub, start, end)
|
||||
def rjust(self, width, *args):
|
||||
return self.__class__(self.data.rjust(width, *args))
|
||||
def rpartition(self, sep):
|
||||
return self.data.rpartition(sep)
|
||||
def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
|
||||
def split(self, sep=None, maxsplit=-1):
|
||||
return self.data.split(sep, maxsplit)
|
||||
def rsplit(self, sep=None, maxsplit=-1):
|
||||
return self.data.rsplit(sep, maxsplit)
|
||||
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
|
||||
def startswith(self, prefix, start=0, end=sys.maxint):
|
||||
return self.data.startswith(prefix, start, end)
|
||||
def strip(self, chars=None): return self.__class__(self.data.strip(chars))
|
||||
def swapcase(self): return self.__class__(self.data.swapcase())
|
||||
def title(self): return self.__class__(self.data.title())
|
||||
def translate(self, *args):
|
||||
return self.__class__(self.data.translate(*args))
|
||||
def upper(self): return self.__class__(self.data.upper())
|
||||
def zfill(self, width): return self.__class__(self.data.zfill(width))
|
||||
|
||||
class MutableString(UserString, collections.MutableSequence):
|
||||
"""mutable string objects
|
||||
|
||||
Python strings are immutable objects. This has the advantage, that
|
||||
strings may be used as dictionary keys. If this property isn't needed
|
||||
and you insist on changing string values in place instead, you may cheat
|
||||
and use MutableString.
|
||||
|
||||
But the purpose of this class is an educational one: to prevent
|
||||
people from inventing their own mutable string class derived
|
||||
from UserString and than forget thereby to remove (override) the
|
||||
__hash__ method inherited from UserString. This would lead to
|
||||
errors that would be very hard to track down.
|
||||
|
||||
A faster and better solution is to rewrite your program using lists."""
|
||||
def __init__(self, string=""):
|
||||
from warnings import warnpy3k
|
||||
warnpy3k('the class UserString.MutableString has been removed in '
|
||||
'Python 3.0', stacklevel=2)
|
||||
self.data = string
|
||||
|
||||
# We inherit object.__hash__, so we must deny this explicitly
|
||||
__hash__ = None
|
||||
|
||||
def __setitem__(self, index, sub):
|
||||
if isinstance(index, slice):
|
||||
if isinstance(sub, UserString):
|
||||
sub = sub.data
|
||||
elif not isinstance(sub, basestring):
|
||||
sub = str(sub)
|
||||
start, stop, step = index.indices(len(self.data))
|
||||
if step == -1:
|
||||
start, stop = stop+1, start+1
|
||||
sub = sub[::-1]
|
||||
elif step != 1:
|
||||
# XXX(twouters): I guess we should be reimplementing
|
||||
# the extended slice assignment/deletion algorithm here...
|
||||
raise TypeError, "invalid step in slicing assignment"
|
||||
start = min(start, stop)
|
||||
self.data = self.data[:start] + sub + self.data[stop:]
|
||||
else:
|
||||
if index < 0:
|
||||
index += len(self.data)
|
||||
if index < 0 or index >= len(self.data): raise IndexError
|
||||
self.data = self.data[:index] + sub + self.data[index+1:]
|
||||
def __delitem__(self, index):
|
||||
if isinstance(index, slice):
|
||||
start, stop, step = index.indices(len(self.data))
|
||||
if step == -1:
|
||||
start, stop = stop+1, start+1
|
||||
elif step != 1:
|
||||
# XXX(twouters): see same block in __setitem__
|
||||
raise TypeError, "invalid step in slicing deletion"
|
||||
start = min(start, stop)
|
||||
self.data = self.data[:start] + self.data[stop:]
|
||||
else:
|
||||
if index < 0:
|
||||
index += len(self.data)
|
||||
if index < 0 or index >= len(self.data): raise IndexError
|
||||
self.data = self.data[:index] + self.data[index+1:]
|
||||
def __setslice__(self, start, end, sub):
|
||||
start = max(start, 0); end = max(end, 0)
|
||||
if isinstance(sub, UserString):
|
||||
self.data = self.data[:start]+sub.data+self.data[end:]
|
||||
elif isinstance(sub, basestring):
|
||||
self.data = self.data[:start]+sub+self.data[end:]
|
||||
else:
|
||||
self.data = self.data[:start]+str(sub)+self.data[end:]
|
||||
def __delslice__(self, start, end):
|
||||
start = max(start, 0); end = max(end, 0)
|
||||
self.data = self.data[:start] + self.data[end:]
|
||||
def immutable(self):
|
||||
return UserString(self.data)
|
||||
def __iadd__(self, other):
|
||||
if isinstance(other, UserString):
|
||||
self.data += other.data
|
||||
elif isinstance(other, basestring):
|
||||
self.data += other
|
||||
else:
|
||||
self.data += str(other)
|
||||
return self
|
||||
def __imul__(self, n):
|
||||
self.data *= n
|
||||
return self
|
||||
def insert(self, index, value):
|
||||
self[index:index] = value
|
||||
|
||||
if __name__ == "__main__":
|
||||
# execute the regression test to stdout, if called as a script:
|
||||
import os
|
||||
called_in_dir, called_as = os.path.split(sys.argv[0])
|
||||
called_as, py = os.path.splitext(called_as)
|
||||
if '-q' in sys.argv:
|
||||
from test import test_support
|
||||
test_support.verbose = 0
|
||||
__import__('test.test_' + called_as.lower())
|
Binary file not shown.
170
PythonHome/Lib/_LWPCookieJar.py
Normal file
170
PythonHome/Lib/_LWPCookieJar.py
Normal file
@ -0,0 +1,170 @@
|
||||
"""Load / save to libwww-perl (LWP) format files.
|
||||
|
||||
Actually, the format is slightly extended from that used by LWP's
|
||||
(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
|
||||
not recorded by LWP.
|
||||
|
||||
It uses the version string "2.0", though really there isn't an LWP Cookies
|
||||
2.0 format. This indicates that there is extra information in here
|
||||
(domain_dot and # port_spec) while still being compatible with
|
||||
libwww-perl, I hope.
|
||||
|
||||
"""
|
||||
|
||||
import time, re
|
||||
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
|
||||
Cookie, MISSING_FILENAME_TEXT,
|
||||
join_header_words, split_header_words,
|
||||
iso2time, time2isoz)
|
||||
|
||||
def lwp_cookie_str(cookie):
|
||||
"""Return string representation of Cookie in an the LWP cookie file format.
|
||||
|
||||
Actually, the format is extended a bit -- see module docstring.
|
||||
|
||||
"""
|
||||
h = [(cookie.name, cookie.value),
|
||||
("path", cookie.path),
|
||||
("domain", cookie.domain)]
|
||||
if cookie.port is not None: h.append(("port", cookie.port))
|
||||
if cookie.path_specified: h.append(("path_spec", None))
|
||||
if cookie.port_specified: h.append(("port_spec", None))
|
||||
if cookie.domain_initial_dot: h.append(("domain_dot", None))
|
||||
if cookie.secure: h.append(("secure", None))
|
||||
if cookie.expires: h.append(("expires",
|
||||
time2isoz(float(cookie.expires))))
|
||||
if cookie.discard: h.append(("discard", None))
|
||||
if cookie.comment: h.append(("comment", cookie.comment))
|
||||
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
|
||||
|
||||
keys = cookie._rest.keys()
|
||||
keys.sort()
|
||||
for k in keys:
|
||||
h.append((k, str(cookie._rest[k])))
|
||||
|
||||
h.append(("version", str(cookie.version)))
|
||||
|
||||
return join_header_words([h])
|
||||
|
||||
class LWPCookieJar(FileCookieJar):
|
||||
"""
|
||||
The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
|
||||
"Set-Cookie3" is the format used by the libwww-perl libary, not known
|
||||
to be compatible with any browser, but which is easy to read and
|
||||
doesn't lose information about RFC 2965 cookies.
|
||||
|
||||
Additional methods
|
||||
|
||||
as_lwp_str(ignore_discard=True, ignore_expired=True)
|
||||
|
||||
"""
|
||||
|
||||
def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
|
||||
"""Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
|
||||
|
||||
ignore_discard and ignore_expires: see docstring for FileCookieJar.save
|
||||
|
||||
"""
|
||||
now = time.time()
|
||||
r = []
|
||||
for cookie in self:
|
||||
if not ignore_discard and cookie.discard:
|
||||
continue
|
||||
if not ignore_expires and cookie.is_expired(now):
|
||||
continue
|
||||
r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
|
||||
return "\n".join(r+[""])
|
||||
|
||||
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
|
||||
if filename is None:
|
||||
if self.filename is not None: filename = self.filename
|
||||
else: raise ValueError(MISSING_FILENAME_TEXT)
|
||||
|
||||
f = open(filename, "w")
|
||||
try:
|
||||
# There really isn't an LWP Cookies 2.0 format, but this indicates
|
||||
# that there is extra information in here (domain_dot and
|
||||
# port_spec) while still being compatible with libwww-perl, I hope.
|
||||
f.write("#LWP-Cookies-2.0\n")
|
||||
f.write(self.as_lwp_str(ignore_discard, ignore_expires))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def _really_load(self, f, filename, ignore_discard, ignore_expires):
|
||||
magic = f.readline()
|
||||
if not re.search(self.magic_re, magic):
|
||||
msg = ("%r does not look like a Set-Cookie3 (LWP) format "
|
||||
"file" % filename)
|
||||
raise LoadError(msg)
|
||||
|
||||
now = time.time()
|
||||
|
||||
header = "Set-Cookie3:"
|
||||
boolean_attrs = ("port_spec", "path_spec", "domain_dot",
|
||||
"secure", "discard")
|
||||
value_attrs = ("version",
|
||||
"port", "path", "domain",
|
||||
"expires",
|
||||
"comment", "commenturl")
|
||||
|
||||
try:
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if line == "": break
|
||||
if not line.startswith(header):
|
||||
continue
|
||||
line = line[len(header):].strip()
|
||||
|
||||
for data in split_header_words([line]):
|
||||
name, value = data[0]
|
||||
standard = {}
|
||||
rest = {}
|
||||
for k in boolean_attrs:
|
||||
standard[k] = False
|
||||
for k, v in data[1:]:
|
||||
if k is not None:
|
||||
lc = k.lower()
|
||||
else:
|
||||
lc = None
|
||||
# don't lose case distinction for unknown fields
|
||||
if (lc in value_attrs) or (lc in boolean_attrs):
|
||||
k = lc
|
||||
if k in boolean_attrs:
|
||||
if v is None: v = True
|
||||
standard[k] = v
|
||||
elif k in value_attrs:
|
||||
standard[k] = v
|
||||
else:
|
||||
rest[k] = v
|
||||
|
||||
h = standard.get
|
||||
expires = h("expires")
|
||||
discard = h("discard")
|
||||
if expires is not None:
|
||||
expires = iso2time(expires)
|
||||
if expires is None:
|
||||
discard = True
|
||||
domain = h("domain")
|
||||
domain_specified = domain.startswith(".")
|
||||
c = Cookie(h("version"), name, value,
|
||||
h("port"), h("port_spec"),
|
||||
domain, domain_specified, h("domain_dot"),
|
||||
h("path"), h("path_spec"),
|
||||
h("secure"),
|
||||
expires,
|
||||
discard,
|
||||
h("comment"),
|
||||
h("commenturl"),
|
||||
rest)
|
||||
if not ignore_discard and c.discard:
|
||||
continue
|
||||
if not ignore_expires and c.is_expired(now):
|
||||
continue
|
||||
self.set_cookie(c)
|
||||
|
||||
except IOError:
|
||||
raise
|
||||
except Exception:
|
||||
_warn_unhandled_exception()
|
||||
raise LoadError("invalid Set-Cookie3 format file %r: %r" %
|
||||
(filename, line))
|
Binary file not shown.
149
PythonHome/Lib/_MozillaCookieJar.py
Normal file
149
PythonHome/Lib/_MozillaCookieJar.py
Normal file
@ -0,0 +1,149 @@
|
||||
"""Mozilla / Netscape cookie loading / saving."""
|
||||
|
||||
import re, time
|
||||
|
||||
from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
|
||||
Cookie, MISSING_FILENAME_TEXT)
|
||||
|
||||
class MozillaCookieJar(FileCookieJar):
|
||||
"""
|
||||
|
||||
WARNING: you may want to backup your browser's cookies file if you use
|
||||
this class to save cookies. I *think* it works, but there have been
|
||||
bugs in the past!
|
||||
|
||||
This class differs from CookieJar only in the format it uses to save and
|
||||
load cookies to and from a file. This class uses the Mozilla/Netscape
|
||||
`cookies.txt' format. lynx uses this file format, too.
|
||||
|
||||
Don't expect cookies saved while the browser is running to be noticed by
|
||||
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
|
||||
you change them on disk while it's running; on Windows, you probably can't
|
||||
save at all while the browser is running).
|
||||
|
||||
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
|
||||
Netscape cookies on saving.
|
||||
|
||||
In particular, the cookie version and port number information is lost,
|
||||
together with information about whether or not Path, Port and Discard were
|
||||
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
|
||||
domain as set in the HTTP header started with a dot (yes, I'm aware some
|
||||
domains in Netscape files start with a dot and some don't -- trust me, you
|
||||
really don't want to know any more about this).
|
||||
|
||||
Note that though Mozilla and Netscape use the same format, they use
|
||||
slightly different headers. The class saves cookies using the Netscape
|
||||
header by default (Mozilla can cope with that).
|
||||
|
||||
"""
|
||||
magic_re = "#( Netscape)? HTTP Cookie File"
|
||||
header = """\
|
||||
# Netscape HTTP Cookie File
|
||||
# http://curl.haxx.se/rfc/cookie_spec.html
|
||||
# This is a generated file! Do not edit.
|
||||
|
||||
"""
|
||||
|
||||
def _really_load(self, f, filename, ignore_discard, ignore_expires):
|
||||
now = time.time()
|
||||
|
||||
magic = f.readline()
|
||||
if not re.search(self.magic_re, magic):
|
||||
f.close()
|
||||
raise LoadError(
|
||||
"%r does not look like a Netscape format cookies file" %
|
||||
filename)
|
||||
|
||||
try:
|
||||
while 1:
|
||||
line = f.readline()
|
||||
if line == "": break
|
||||
|
||||
# last field may be absent, so keep any trailing tab
|
||||
if line.endswith("\n"): line = line[:-1]
|
||||
|
||||
# skip comments and blank lines XXX what is $ for?
|
||||
if (line.strip().startswith(("#", "$")) or
|
||||
line.strip() == ""):
|
||||
continue
|
||||
|
||||
domain, domain_specified, path, secure, expires, name, value = \
|
||||
line.split("\t")
|
||||
secure = (secure == "TRUE")
|
||||
domain_specified = (domain_specified == "TRUE")
|
||||
if name == "":
|
||||
# cookies.txt regards 'Set-Cookie: foo' as a cookie
|
||||
# with no name, whereas cookielib regards it as a
|
||||
# cookie with no value.
|
||||
name = value
|
||||
value = None
|
||||
|
||||
initial_dot = domain.startswith(".")
|
||||
assert domain_specified == initial_dot
|
||||
|
||||
discard = False
|
||||
if expires == "":
|
||||
expires = None
|
||||
discard = True
|
||||
|
||||
# assume path_specified is false
|
||||
c = Cookie(0, name, value,
|
||||
None, False,
|
||||
domain, domain_specified, initial_dot,
|
||||
path, False,
|
||||
secure,
|
||||
expires,
|
||||
discard,
|
||||
None,
|
||||
None,
|
||||
{})
|
||||
if not ignore_discard and c.discard:
|
||||
continue
|
||||
if not ignore_expires and c.is_expired(now):
|
||||
continue
|
||||
self.set_cookie(c)
|
||||
|
||||
except IOError:
|
||||
raise
|
||||
except Exception:
|
||||
_warn_unhandled_exception()
|
||||
raise LoadError("invalid Netscape format cookies file %r: %r" %
|
||||
(filename, line))
|
||||
|
||||
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
|
||||
if filename is None:
|
||||
if self.filename is not None: filename = self.filename
|
||||
else: raise ValueError(MISSING_FILENAME_TEXT)
|
||||
|
||||
f = open(filename, "w")
|
||||
try:
|
||||
f.write(self.header)
|
||||
now = time.time()
|
||||
for cookie in self:
|
||||
if not ignore_discard and cookie.discard:
|
||||
continue
|
||||
if not ignore_expires and cookie.is_expired(now):
|
||||
continue
|
||||
if cookie.secure: secure = "TRUE"
|
||||
else: secure = "FALSE"
|
||||
if cookie.domain.startswith("."): initial_dot = "TRUE"
|
||||
else: initial_dot = "FALSE"
|
||||
if cookie.expires is not None:
|
||||
expires = str(cookie.expires)
|
||||
else:
|
||||
expires = ""
|
||||
if cookie.value is None:
|
||||
# cookies.txt regards 'Set-Cookie: foo' as a cookie
|
||||
# with no name, whereas cookielib regards it as a
|
||||
# cookie with no value.
|
||||
name = ""
|
||||
value = cookie.name
|
||||
else:
|
||||
name = cookie.name
|
||||
value = cookie.value
|
||||
f.write(
|
||||
"\t".join([cookie.domain, initial_dot, cookie.path,
|
||||
secure, expires, name, value])+
|
||||
"\n")
|
||||
finally:
|
||||
f.close()
|
Binary file not shown.
128
PythonHome/Lib/__future__.py
Normal file
128
PythonHome/Lib/__future__.py
Normal file
@ -0,0 +1,128 @@
|
||||
"""Record of phased-in incompatible language changes.
|
||||
|
||||
Each line is of the form:
|
||||
|
||||
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
|
||||
CompilerFlag ")"
|
||||
|
||||
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
|
||||
of the same form as sys.version_info:
|
||||
|
||||
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
|
||||
PY_MINOR_VERSION, # the 1; an int
|
||||
PY_MICRO_VERSION, # the 0; an int
|
||||
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
|
||||
PY_RELEASE_SERIAL # the 3; an int
|
||||
)
|
||||
|
||||
OptionalRelease records the first release in which
|
||||
|
||||
from __future__ import FeatureName
|
||||
|
||||
was accepted.
|
||||
|
||||
In the case of MandatoryReleases that have not yet occurred,
|
||||
MandatoryRelease predicts the release in which the feature will become part
|
||||
of the language.
|
||||
|
||||
Else MandatoryRelease records when the feature became part of the language;
|
||||
in releases at or after that, modules no longer need
|
||||
|
||||
from __future__ import FeatureName
|
||||
|
||||
to use the feature in question, but may continue to use such imports.
|
||||
|
||||
MandatoryRelease may also be None, meaning that a planned feature got
|
||||
dropped.
|
||||
|
||||
Instances of class _Feature have two corresponding methods,
|
||||
.getOptionalRelease() and .getMandatoryRelease().
|
||||
|
||||
CompilerFlag is the (bitfield) flag that should be passed in the fourth
|
||||
argument to the builtin function compile() to enable the feature in
|
||||
dynamically compiled code. This flag is stored in the .compiler_flag
|
||||
attribute on _Future instances. These values must match the appropriate
|
||||
#defines of CO_xxx flags in Include/compile.h.
|
||||
|
||||
No feature line is ever to be deleted from this file.
|
||||
"""
|
||||
|
||||
all_feature_names = [
|
||||
"nested_scopes",
|
||||
"generators",
|
||||
"division",
|
||||
"absolute_import",
|
||||
"with_statement",
|
||||
"print_function",
|
||||
"unicode_literals",
|
||||
]
|
||||
|
||||
__all__ = ["all_feature_names"] + all_feature_names
|
||||
|
||||
# The CO_xxx symbols are defined here under the same names used by
|
||||
# compile.h, so that an editor search will find them here. However,
|
||||
# they're not exported in __all__, because they don't really belong to
|
||||
# this module.
|
||||
CO_NESTED = 0x0010 # nested_scopes
|
||||
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
|
||||
CO_FUTURE_DIVISION = 0x2000 # division
|
||||
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
|
||||
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
|
||||
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
|
||||
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
|
||||
|
||||
class _Feature:
|
||||
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
|
||||
self.optional = optionalRelease
|
||||
self.mandatory = mandatoryRelease
|
||||
self.compiler_flag = compiler_flag
|
||||
|
||||
def getOptionalRelease(self):
|
||||
"""Return first release in which this feature was recognized.
|
||||
|
||||
This is a 5-tuple, of the same form as sys.version_info.
|
||||
"""
|
||||
|
||||
return self.optional
|
||||
|
||||
def getMandatoryRelease(self):
|
||||
"""Return release in which this feature will become mandatory.
|
||||
|
||||
This is a 5-tuple, of the same form as sys.version_info, or, if
|
||||
the feature was dropped, is None.
|
||||
"""
|
||||
|
||||
return self.mandatory
|
||||
|
||||
def __repr__(self):
|
||||
return "_Feature" + repr((self.optional,
|
||||
self.mandatory,
|
||||
self.compiler_flag))
|
||||
|
||||
nested_scopes = _Feature((2, 1, 0, "beta", 1),
|
||||
(2, 2, 0, "alpha", 0),
|
||||
CO_NESTED)
|
||||
|
||||
generators = _Feature((2, 2, 0, "alpha", 1),
|
||||
(2, 3, 0, "final", 0),
|
||||
CO_GENERATOR_ALLOWED)
|
||||
|
||||
division = _Feature((2, 2, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_DIVISION)
|
||||
|
||||
absolute_import = _Feature((2, 5, 0, "alpha", 1),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_ABSOLUTE_IMPORT)
|
||||
|
||||
with_statement = _Feature((2, 5, 0, "alpha", 1),
|
||||
(2, 6, 0, "alpha", 0),
|
||||
CO_FUTURE_WITH_STATEMENT)
|
||||
|
||||
print_function = _Feature((2, 6, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_PRINT_FUNCTION)
|
||||
|
||||
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
|
||||
(3, 0, 0, "alpha", 0),
|
||||
CO_FUTURE_UNICODE_LITERALS)
|
Binary file not shown.
1
PythonHome/Lib/__phello__.foo.py
Normal file
1
PythonHome/Lib/__phello__.foo.py
Normal file
@ -0,0 +1 @@
|
||||
# This file exists as a helper for the test.test_frozen module.
|
Binary file not shown.
690
PythonHome/Lib/_abcoll.py
Normal file
690
PythonHome/Lib/_abcoll.py
Normal file
@ -0,0 +1,690 @@
|
||||
# Copyright 2007 Google, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
|
||||
|
||||
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
|
||||
via collections; they are defined here only to alleviate certain
|
||||
bootstrapping issues. Unit tests are in test_collections.
|
||||
"""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import sys
|
||||
|
||||
__all__ = ["Hashable", "Iterable", "Iterator",
|
||||
"Sized", "Container", "Callable",
|
||||
"Set", "MutableSet",
|
||||
"Mapping", "MutableMapping",
|
||||
"MappingView", "KeysView", "ItemsView", "ValuesView",
|
||||
"Sequence", "MutableSequence",
|
||||
]
|
||||
|
||||
### ONE-TRICK PONIES ###
|
||||
|
||||
def _hasattr(C, attr):
|
||||
try:
|
||||
return any(attr in B.__dict__ for B in C.__mro__)
|
||||
except AttributeError:
|
||||
# Old-style class
|
||||
return hasattr(C, attr)
|
||||
|
||||
|
||||
class Hashable:
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def __hash__(self):
|
||||
return 0
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is Hashable:
|
||||
try:
|
||||
for B in C.__mro__:
|
||||
if "__hash__" in B.__dict__:
|
||||
if B.__dict__["__hash__"]:
|
||||
return True
|
||||
break
|
||||
except AttributeError:
|
||||
# Old-style class
|
||||
if getattr(C, "__hash__", None):
|
||||
return True
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class Iterable:
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def __iter__(self):
|
||||
while False:
|
||||
yield None
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is Iterable:
|
||||
if _hasattr(C, "__iter__"):
|
||||
return True
|
||||
return NotImplemented
|
||||
|
||||
Iterable.register(str)
|
||||
|
||||
|
||||
class Iterator(Iterable):
|
||||
|
||||
@abstractmethod
|
||||
def next(self):
|
||||
'Return the next item from the iterator. When exhausted, raise StopIteration'
|
||||
raise StopIteration
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is Iterator:
|
||||
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
|
||||
return True
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class Sized:
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def __len__(self):
|
||||
return 0
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is Sized:
|
||||
if _hasattr(C, "__len__"):
|
||||
return True
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class Container:
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def __contains__(self, x):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is Container:
|
||||
if _hasattr(C, "__contains__"):
|
||||
return True
|
||||
return NotImplemented
|
||||
|
||||
|
||||
class Callable:
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, *args, **kwds):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, C):
|
||||
if cls is Callable:
|
||||
if _hasattr(C, "__call__"):
|
||||
return True
|
||||
return NotImplemented
|
||||
|
||||
|
||||
### SETS ###
|
||||
|
||||
|
||||
class Set(Sized, Iterable, Container):
|
||||
"""A set is a finite, iterable container.
|
||||
|
||||
This class provides concrete generic implementations of all
|
||||
methods except for __contains__, __iter__ and __len__.
|
||||
|
||||
To override the comparisons (presumably for speed, as the
|
||||
semantics are fixed), all you have to do is redefine __le__ and
|
||||
then the other operations will automatically follow suit.
|
||||
"""
|
||||
|
||||
def __le__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
return NotImplemented
|
||||
if len(self) > len(other):
|
||||
return False
|
||||
for elem in self:
|
||||
if elem not in other:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
return NotImplemented
|
||||
return len(self) < len(other) and self.__le__(other)
|
||||
|
||||
def __gt__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
return NotImplemented
|
||||
return len(self) > len(other) and self.__ge__(other)
|
||||
|
||||
def __ge__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
return NotImplemented
|
||||
if len(self) < len(other):
|
||||
return False
|
||||
for elem in other:
|
||||
if elem not in self:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
return NotImplemented
|
||||
return len(self) == len(other) and self.__le__(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
@classmethod
|
||||
def _from_iterable(cls, it):
|
||||
'''Construct an instance of the class from any iterable input.
|
||||
|
||||
Must override this method if the class constructor signature
|
||||
does not accept an iterable for an input.
|
||||
'''
|
||||
return cls(it)
|
||||
|
||||
def __and__(self, other):
|
||||
if not isinstance(other, Iterable):
|
||||
return NotImplemented
|
||||
return self._from_iterable(value for value in other if value in self)
|
||||
|
||||
__rand__ = __and__
|
||||
|
||||
def isdisjoint(self, other):
|
||||
'Return True if two sets have a null intersection.'
|
||||
for value in other:
|
||||
if value in self:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __or__(self, other):
|
||||
if not isinstance(other, Iterable):
|
||||
return NotImplemented
|
||||
chain = (e for s in (self, other) for e in s)
|
||||
return self._from_iterable(chain)
|
||||
|
||||
__ror__ = __or__
|
||||
|
||||
def __sub__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
if not isinstance(other, Iterable):
|
||||
return NotImplemented
|
||||
other = self._from_iterable(other)
|
||||
return self._from_iterable(value for value in self
|
||||
if value not in other)
|
||||
|
||||
def __rsub__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
if not isinstance(other, Iterable):
|
||||
return NotImplemented
|
||||
other = self._from_iterable(other)
|
||||
return self._from_iterable(value for value in other
|
||||
if value not in self)
|
||||
|
||||
def __xor__(self, other):
|
||||
if not isinstance(other, Set):
|
||||
if not isinstance(other, Iterable):
|
||||
return NotImplemented
|
||||
other = self._from_iterable(other)
|
||||
return (self - other) | (other - self)
|
||||
|
||||
__rxor__ = __xor__
|
||||
|
||||
# Sets are not hashable by default, but subclasses can change this
|
||||
__hash__ = None
|
||||
|
||||
def _hash(self):
|
||||
"""Compute the hash value of a set.
|
||||
|
||||
Note that we don't define __hash__: not all sets are hashable.
|
||||
But if you define a hashable set type, its __hash__ should
|
||||
call this function.
|
||||
|
||||
This must be compatible __eq__.
|
||||
|
||||
All sets ought to compare equal if they contain the same
|
||||
elements, regardless of how they are implemented, and
|
||||
regardless of the order of the elements; so there's not much
|
||||
freedom for __eq__ or __hash__. We match the algorithm used
|
||||
by the built-in frozenset type.
|
||||
"""
|
||||
MAX = sys.maxint
|
||||
MASK = 2 * MAX + 1
|
||||
n = len(self)
|
||||
h = 1927868237 * (n + 1)
|
||||
h &= MASK
|
||||
for x in self:
|
||||
hx = hash(x)
|
||||
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
|
||||
h &= MASK
|
||||
h = h * 69069 + 907133923
|
||||
h &= MASK
|
||||
if h > MAX:
|
||||
h -= MASK + 1
|
||||
if h == -1:
|
||||
h = 590923713
|
||||
return h
|
||||
|
||||
Set.register(frozenset)
|
||||
|
||||
|
||||
class MutableSet(Set):
|
||||
"""A mutable set is a finite, iterable container.
|
||||
|
||||
This class provides concrete generic implementations of all
|
||||
methods except for __contains__, __iter__, __len__,
|
||||
add(), and discard().
|
||||
|
||||
To override the comparisons (presumably for speed, as the
|
||||
semantics are fixed), all you have to do is redefine __le__ and
|
||||
then the other operations will automatically follow suit.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def add(self, value):
|
||||
"""Add an element."""
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def discard(self, value):
|
||||
"""Remove an element. Do not raise an exception if absent."""
|
||||
raise NotImplementedError
|
||||
|
||||
def remove(self, value):
|
||||
"""Remove an element. If not a member, raise a KeyError."""
|
||||
if value not in self:
|
||||
raise KeyError(value)
|
||||
self.discard(value)
|
||||
|
||||
def pop(self):
|
||||
"""Return the popped value. Raise KeyError if empty."""
|
||||
it = iter(self)
|
||||
try:
|
||||
value = next(it)
|
||||
except StopIteration:
|
||||
raise KeyError
|
||||
self.discard(value)
|
||||
return value
|
||||
|
||||
def clear(self):
|
||||
"""This is slow (creates N new iterators!) but effective."""
|
||||
try:
|
||||
while True:
|
||||
self.pop()
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def __ior__(self, it):
|
||||
for value in it:
|
||||
self.add(value)
|
||||
return self
|
||||
|
||||
def __iand__(self, it):
|
||||
for value in (self - it):
|
||||
self.discard(value)
|
||||
return self
|
||||
|
||||
def __ixor__(self, it):
|
||||
if it is self:
|
||||
self.clear()
|
||||
else:
|
||||
if not isinstance(it, Set):
|
||||
it = self._from_iterable(it)
|
||||
for value in it:
|
||||
if value in self:
|
||||
self.discard(value)
|
||||
else:
|
||||
self.add(value)
|
||||
return self
|
||||
|
||||
def __isub__(self, it):
|
||||
if it is self:
|
||||
self.clear()
|
||||
else:
|
||||
for value in it:
|
||||
self.discard(value)
|
||||
return self
|
||||
|
||||
MutableSet.register(set)
|
||||
|
||||
|
||||
### MAPPINGS ###
|
||||
|
||||
|
||||
class Mapping(Sized, Iterable, Container):
|
||||
|
||||
"""A Mapping is a generic container for associating key/value
|
||||
pairs.
|
||||
|
||||
This class provides concrete generic implementations of all
|
||||
methods except for __getitem__, __iter__, and __len__.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __getitem__(self, key):
|
||||
raise KeyError
|
||||
|
||||
def get(self, key, default=None):
|
||||
'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def __contains__(self, key):
|
||||
try:
|
||||
self[key]
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def iterkeys(self):
|
||||
'D.iterkeys() -> an iterator over the keys of D'
|
||||
return iter(self)
|
||||
|
||||
def itervalues(self):
|
||||
'D.itervalues() -> an iterator over the values of D'
|
||||
for key in self:
|
||||
yield self[key]
|
||||
|
||||
def iteritems(self):
|
||||
'D.iteritems() -> an iterator over the (key, value) items of D'
|
||||
for key in self:
|
||||
yield (key, self[key])
|
||||
|
||||
def keys(self):
|
||||
"D.keys() -> list of D's keys"
|
||||
return list(self)
|
||||
|
||||
def items(self):
|
||||
"D.items() -> list of D's (key, value) pairs, as 2-tuples"
|
||||
return [(key, self[key]) for key in self]
|
||||
|
||||
def values(self):
|
||||
"D.values() -> list of D's values"
|
||||
return [self[key] for key in self]
|
||||
|
||||
# Mappings are not hashable by default, but subclasses can change this
|
||||
__hash__ = None
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
return NotImplemented
|
||||
return dict(self.items()) == dict(other.items())
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
class MappingView(Sized):
|
||||
|
||||
def __init__(self, mapping):
|
||||
self._mapping = mapping
|
||||
|
||||
def __len__(self):
|
||||
return len(self._mapping)
|
||||
|
||||
def __repr__(self):
|
||||
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
|
||||
|
||||
|
||||
class KeysView(MappingView, Set):
|
||||
|
||||
@classmethod
|
||||
def _from_iterable(self, it):
|
||||
return set(it)
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in self._mapping
|
||||
|
||||
def __iter__(self):
|
||||
for key in self._mapping:
|
||||
yield key
|
||||
|
||||
|
||||
class ItemsView(MappingView, Set):
|
||||
|
||||
@classmethod
|
||||
def _from_iterable(self, it):
|
||||
return set(it)
|
||||
|
||||
def __contains__(self, item):
|
||||
key, value = item
|
||||
try:
|
||||
v = self._mapping[key]
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
return v == value
|
||||
|
||||
def __iter__(self):
|
||||
for key in self._mapping:
|
||||
yield (key, self._mapping[key])
|
||||
|
||||
|
||||
class ValuesView(MappingView):
|
||||
|
||||
def __contains__(self, value):
|
||||
for key in self._mapping:
|
||||
if value == self._mapping[key]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __iter__(self):
|
||||
for key in self._mapping:
|
||||
yield self._mapping[key]
|
||||
|
||||
|
||||
class MutableMapping(Mapping):
|
||||
|
||||
"""A MutableMapping is a generic container for associating
|
||||
key/value pairs.
|
||||
|
||||
This class provides concrete generic implementations of all
|
||||
methods except for __getitem__, __setitem__, __delitem__,
|
||||
__iter__, and __len__.
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __setitem__(self, key, value):
|
||||
raise KeyError
|
||||
|
||||
@abstractmethod
|
||||
def __delitem__(self, key):
|
||||
raise KeyError
|
||||
|
||||
__marker = object()
|
||||
|
||||
def pop(self, key, default=__marker):
|
||||
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
|
||||
If key is not found, d is returned if given, otherwise KeyError is raised.
|
||||
'''
|
||||
try:
|
||||
value = self[key]
|
||||
except KeyError:
|
||||
if default is self.__marker:
|
||||
raise
|
||||
return default
|
||||
else:
|
||||
del self[key]
|
||||
return value
|
||||
|
||||
def popitem(self):
|
||||
'''D.popitem() -> (k, v), remove and return some (key, value) pair
|
||||
as a 2-tuple; but raise KeyError if D is empty.
|
||||
'''
|
||||
try:
|
||||
key = next(iter(self))
|
||||
except StopIteration:
|
||||
raise KeyError
|
||||
value = self[key]
|
||||
del self[key]
|
||||
return key, value
|
||||
|
||||
def clear(self):
|
||||
'D.clear() -> None. Remove all items from D.'
|
||||
try:
|
||||
while True:
|
||||
self.popitem()
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def update(*args, **kwds):
|
||||
''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F.
|
||||
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
|
||||
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
|
||||
In either case, this is followed by: for k, v in F.items(): D[k] = v
|
||||
'''
|
||||
if len(args) > 2:
|
||||
raise TypeError("update() takes at most 2 positional "
|
||||
"arguments ({} given)".format(len(args)))
|
||||
elif not args:
|
||||
raise TypeError("update() takes at least 1 argument (0 given)")
|
||||
self = args[0]
|
||||
other = args[1] if len(args) >= 2 else ()
|
||||
|
||||
if isinstance(other, Mapping):
|
||||
for key in other:
|
||||
self[key] = other[key]
|
||||
elif hasattr(other, "keys"):
|
||||
for key in other.keys():
|
||||
self[key] = other[key]
|
||||
else:
|
||||
for key, value in other:
|
||||
self[key] = value
|
||||
for key, value in kwds.items():
|
||||
self[key] = value
|
||||
|
||||
def setdefault(self, key, default=None):
|
||||
'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
|
||||
MutableMapping.register(dict)
|
||||
|
||||
|
||||
### SEQUENCES ###
|
||||
|
||||
|
||||
class Sequence(Sized, Iterable, Container):
|
||||
"""All the operations on a read-only sequence.
|
||||
|
||||
Concrete subclasses must override __new__ or __init__,
|
||||
__getitem__, and __len__.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __getitem__(self, index):
|
||||
raise IndexError
|
||||
|
||||
def __iter__(self):
|
||||
i = 0
|
||||
try:
|
||||
while True:
|
||||
v = self[i]
|
||||
yield v
|
||||
i += 1
|
||||
except IndexError:
|
||||
return
|
||||
|
||||
def __contains__(self, value):
|
||||
for v in self:
|
||||
if v == value:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __reversed__(self):
|
||||
for i in reversed(range(len(self))):
|
||||
yield self[i]
|
||||
|
||||
def index(self, value):
|
||||
'''S.index(value) -> integer -- return first index of value.
|
||||
Raises ValueError if the value is not present.
|
||||
'''
|
||||
for i, v in enumerate(self):
|
||||
if v == value:
|
||||
return i
|
||||
raise ValueError
|
||||
|
||||
def count(self, value):
|
||||
'S.count(value) -> integer -- return number of occurrences of value'
|
||||
return sum(1 for v in self if v == value)
|
||||
|
||||
Sequence.register(tuple)
|
||||
Sequence.register(basestring)
|
||||
Sequence.register(buffer)
|
||||
Sequence.register(xrange)
|
||||
|
||||
|
||||
class MutableSequence(Sequence):
|
||||
|
||||
"""All the operations on a read-only sequence.
|
||||
|
||||
Concrete subclasses must provide __new__ or __init__,
|
||||
__getitem__, __setitem__, __delitem__, __len__, and insert().
|
||||
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __setitem__(self, index, value):
|
||||
raise IndexError
|
||||
|
||||
@abstractmethod
|
||||
def __delitem__(self, index):
|
||||
raise IndexError
|
||||
|
||||
@abstractmethod
|
||||
def insert(self, index, value):
|
||||
'S.insert(index, object) -- insert object before index'
|
||||
raise IndexError
|
||||
|
||||
def append(self, value):
|
||||
'S.append(object) -- append object to the end of the sequence'
|
||||
self.insert(len(self), value)
|
||||
|
||||
def reverse(self):
|
||||
'S.reverse() -- reverse *IN PLACE*'
|
||||
n = len(self)
|
||||
for i in range(n//2):
|
||||
self[i], self[n-i-1] = self[n-i-1], self[i]
|
||||
|
||||
def extend(self, values):
|
||||
'S.extend(iterable) -- extend sequence by appending elements from the iterable'
|
||||
for v in values:
|
||||
self.append(v)
|
||||
|
||||
def pop(self, index=-1):
|
||||
'''S.pop([index]) -> item -- remove and return item at index (default last).
|
||||
Raise IndexError if list is empty or index is out of range.
|
||||
'''
|
||||
v = self[index]
|
||||
del self[index]
|
||||
return v
|
||||
|
||||
def remove(self, value):
|
||||
'''S.remove(value) -- remove first occurrence of value.
|
||||
Raise ValueError if the value is not present.
|
||||
'''
|
||||
del self[self.index(value)]
|
||||
|
||||
def __iadd__(self, values):
|
||||
self.extend(values)
|
||||
return self
|
||||
|
||||
MutableSequence.register(list)
|
Binary file not shown.
502
PythonHome/Lib/_osx_support.py
Normal file
502
PythonHome/Lib/_osx_support.py
Normal file
@ -0,0 +1,502 @@
|
||||
"""Shared OS X support functions."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
__all__ = [
|
||||
'compiler_fixup',
|
||||
'customize_config_vars',
|
||||
'customize_compiler',
|
||||
'get_platform_osx',
|
||||
]
|
||||
|
||||
# configuration variables that may contain universal build flags,
|
||||
# like "-arch" or "-isdkroot", that may need customization for
|
||||
# the user environment
|
||||
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
|
||||
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
|
||||
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
|
||||
'PY_CORE_CFLAGS')
|
||||
|
||||
# configuration variables that may contain compiler calls
|
||||
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
|
||||
|
||||
# prefix added to original configuration variable names
|
||||
_INITPRE = '_OSX_SUPPORT_INITIAL_'
|
||||
|
||||
|
||||
def _find_executable(executable, path=None):
|
||||
"""Tries to find 'executable' in the directories listed in 'path'.
|
||||
|
||||
A string listing directories separated by 'os.pathsep'; defaults to
|
||||
os.environ['PATH']. Returns the complete filename or None if not found.
|
||||
"""
|
||||
if path is None:
|
||||
path = os.environ['PATH']
|
||||
|
||||
paths = path.split(os.pathsep)
|
||||
base, ext = os.path.splitext(executable)
|
||||
|
||||
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
|
||||
executable = executable + '.exe'
|
||||
|
||||
if not os.path.isfile(executable):
|
||||
for p in paths:
|
||||
f = os.path.join(p, executable)
|
||||
if os.path.isfile(f):
|
||||
# the file exists, we have a shot at spawn working
|
||||
return f
|
||||
return None
|
||||
else:
|
||||
return executable
|
||||
|
||||
|
||||
def _read_output(commandstring):
|
||||
"""Output from successful command execution or None"""
|
||||
# Similar to os.popen(commandstring, "r").read(),
|
||||
# but without actually using os.popen because that
|
||||
# function is not usable during python bootstrap.
|
||||
# tempfile is also not available then.
|
||||
import contextlib
|
||||
try:
|
||||
import tempfile
|
||||
fp = tempfile.NamedTemporaryFile()
|
||||
except ImportError:
|
||||
fp = open("/tmp/_osx_support.%s"%(
|
||||
os.getpid(),), "w+b")
|
||||
|
||||
with contextlib.closing(fp) as fp:
|
||||
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
|
||||
return fp.read().strip() if not os.system(cmd) else None
|
||||
|
||||
|
||||
def _find_build_tool(toolname):
|
||||
"""Find a build tool on current path or using xcrun"""
|
||||
return (_find_executable(toolname)
|
||||
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
|
||||
or ''
|
||||
)
|
||||
|
||||
_SYSTEM_VERSION = None
|
||||
|
||||
def _get_system_version():
|
||||
"""Return the OS X system version as a string"""
|
||||
# Reading this plist is a documented way to get the system
|
||||
# version (see the documentation for the Gestalt Manager)
|
||||
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
|
||||
# the build of Python itself (distutils is used to build standard library
|
||||
# extensions).
|
||||
|
||||
global _SYSTEM_VERSION
|
||||
|
||||
if _SYSTEM_VERSION is None:
|
||||
_SYSTEM_VERSION = ''
|
||||
try:
|
||||
f = open('/System/Library/CoreServices/SystemVersion.plist')
|
||||
except IOError:
|
||||
# We're on a plain darwin box, fall back to the default
|
||||
# behaviour.
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
|
||||
r'<string>(.*?)</string>', f.read())
|
||||
finally:
|
||||
f.close()
|
||||
if m is not None:
|
||||
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
|
||||
# else: fall back to the default behaviour
|
||||
|
||||
return _SYSTEM_VERSION
|
||||
|
||||
def _remove_original_values(_config_vars):
|
||||
"""Remove original unmodified values for testing"""
|
||||
# This is needed for higher-level cross-platform tests of get_platform.
|
||||
for k in list(_config_vars):
|
||||
if k.startswith(_INITPRE):
|
||||
del _config_vars[k]
|
||||
|
||||
def _save_modified_value(_config_vars, cv, newvalue):
|
||||
"""Save modified and original unmodified value of configuration var"""
|
||||
|
||||
oldvalue = _config_vars.get(cv, '')
|
||||
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
|
||||
_config_vars[_INITPRE + cv] = oldvalue
|
||||
_config_vars[cv] = newvalue
|
||||
|
||||
def _supports_universal_builds():
|
||||
"""Returns True if universal builds are supported on this system"""
|
||||
# As an approximation, we assume that if we are running on 10.4 or above,
|
||||
# then we are running with an Xcode environment that supports universal
|
||||
# builds, in particular -isysroot and -arch arguments to the compiler. This
|
||||
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
|
||||
|
||||
osx_version = _get_system_version()
|
||||
if osx_version:
|
||||
try:
|
||||
osx_version = tuple(int(i) for i in osx_version.split('.'))
|
||||
except ValueError:
|
||||
osx_version = ''
|
||||
return bool(osx_version >= (10, 4)) if osx_version else False
|
||||
|
||||
|
||||
def _find_appropriate_compiler(_config_vars):
|
||||
"""Find appropriate C compiler for extension module builds"""
|
||||
|
||||
# Issue #13590:
|
||||
# The OSX location for the compiler varies between OSX
|
||||
# (or rather Xcode) releases. With older releases (up-to 10.5)
|
||||
# the compiler is in /usr/bin, with newer releases the compiler
|
||||
# can only be found inside Xcode.app if the "Command Line Tools"
|
||||
# are not installed.
|
||||
#
|
||||
# Futhermore, the compiler that can be used varies between
|
||||
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
|
||||
# as the compiler, after that 'clang' should be used because
|
||||
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
|
||||
# miscompiles Python.
|
||||
|
||||
# skip checks if the compiler was overriden with a CC env variable
|
||||
if 'CC' in os.environ:
|
||||
return _config_vars
|
||||
|
||||
# The CC config var might contain additional arguments.
|
||||
# Ignore them while searching.
|
||||
cc = oldcc = _config_vars['CC'].split()[0]
|
||||
if not _find_executable(cc):
|
||||
# Compiler is not found on the shell search PATH.
|
||||
# Now search for clang, first on PATH (if the Command LIne
|
||||
# Tools have been installed in / or if the user has provided
|
||||
# another location via CC). If not found, try using xcrun
|
||||
# to find an uninstalled clang (within a selected Xcode).
|
||||
|
||||
# NOTE: Cannot use subprocess here because of bootstrap
|
||||
# issues when building Python itself (and os.popen is
|
||||
# implemented on top of subprocess and is therefore not
|
||||
# usable as well)
|
||||
|
||||
cc = _find_build_tool('clang')
|
||||
|
||||
elif os.path.basename(cc).startswith('gcc'):
|
||||
# Compiler is GCC, check if it is LLVM-GCC
|
||||
data = _read_output("'%s' --version"
|
||||
% (cc.replace("'", "'\"'\"'"),))
|
||||
if data and 'llvm-gcc' in data:
|
||||
# Found LLVM-GCC, fall back to clang
|
||||
cc = _find_build_tool('clang')
|
||||
|
||||
if not cc:
|
||||
raise SystemError(
|
||||
"Cannot locate working compiler")
|
||||
|
||||
if cc != oldcc:
|
||||
# Found a replacement compiler.
|
||||
# Modify config vars using new compiler, if not already explicitly
|
||||
# overriden by an env variable, preserving additional arguments.
|
||||
for cv in _COMPILER_CONFIG_VARS:
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
cv_split = _config_vars[cv].split()
|
||||
cv_split[0] = cc if cv != 'CXX' else cc + '++'
|
||||
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _remove_universal_flags(_config_vars):
|
||||
"""Remove all universal build arguments from config vars"""
|
||||
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
# Do not alter a config var explicitly overriden by env var
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub('-arch\s+\w+\s', ' ', flags)
|
||||
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _remove_unsupported_archs(_config_vars):
|
||||
"""Remove any unsupported archs from config vars"""
|
||||
# Different Xcode releases support different sets for '-arch'
|
||||
# flags. In particular, Xcode 4.x no longer supports the
|
||||
# PPC architectures.
|
||||
#
|
||||
# This code automatically removes '-arch ppc' and '-arch ppc64'
|
||||
# when these are not supported. That makes it possible to
|
||||
# build extensions on OSX 10.7 and later with the prebuilt
|
||||
# 32-bit installer on the python.org website.
|
||||
|
||||
# skip checks if the compiler was overriden with a CC env variable
|
||||
if 'CC' in os.environ:
|
||||
return _config_vars
|
||||
|
||||
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
|
||||
# NOTE: Cannot use subprocess here because of bootstrap
|
||||
# issues when building Python itself
|
||||
status = os.system(
|
||||
"""echo 'int main{};' | """
|
||||
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
|
||||
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
|
||||
if status:
|
||||
# The compile failed for some reason. Because of differences
|
||||
# across Xcode and compiler versions, there is no reliable way
|
||||
# to be sure why it failed. Assume here it was due to lack of
|
||||
# PPC support and remove the related '-arch' flags from each
|
||||
# config variables not explicitly overriden by an environment
|
||||
# variable. If the error was for some other reason, we hope the
|
||||
# failure will show up again when trying to compile an extension
|
||||
# module.
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _override_all_archs(_config_vars):
|
||||
"""Allow override of all archs with ARCHFLAGS env var"""
|
||||
# NOTE: This name was introduced by Apple in OSX 10.5 and
|
||||
# is used by several scripting languages distributed with
|
||||
# that OS release.
|
||||
if 'ARCHFLAGS' in os.environ:
|
||||
arch = os.environ['ARCHFLAGS']
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
if cv in _config_vars and '-arch' in _config_vars[cv]:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub('-arch\s+\w+\s', ' ', flags)
|
||||
flags = flags + ' ' + arch
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def _check_for_unavailable_sdk(_config_vars):
|
||||
"""Remove references to any SDKs not available"""
|
||||
# If we're on OSX 10.5 or later and the user tries to
|
||||
# compile an extension using an SDK that is not present
|
||||
# on the current machine it is better to not use an SDK
|
||||
# than to fail. This is particularly important with
|
||||
# the standalone Command Line Tools alternative to a
|
||||
# full-blown Xcode install since the CLT packages do not
|
||||
# provide SDKs. If the SDK is not present, it is assumed
|
||||
# that the header files and dev libs have been installed
|
||||
# to /usr and /System/Library by either a standalone CLT
|
||||
# package or the CLT component within Xcode.
|
||||
cflags = _config_vars.get('CFLAGS', '')
|
||||
m = re.search(r'-isysroot\s+(\S+)', cflags)
|
||||
if m is not None:
|
||||
sdk = m.group(1)
|
||||
if not os.path.exists(sdk):
|
||||
for cv in _UNIVERSAL_CONFIG_VARS:
|
||||
# Do not alter a config var explicitly overriden by env var
|
||||
if cv in _config_vars and cv not in os.environ:
|
||||
flags = _config_vars[cv]
|
||||
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
|
||||
_save_modified_value(_config_vars, cv, flags)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def compiler_fixup(compiler_so, cc_args):
|
||||
"""
|
||||
This function will strip '-isysroot PATH' and '-arch ARCH' from the
|
||||
compile flags if the user has specified one them in extra_compile_flags.
|
||||
|
||||
This is needed because '-arch ARCH' adds another architecture to the
|
||||
build, without a way to remove an architecture. Furthermore GCC will
|
||||
barf if multiple '-isysroot' arguments are present.
|
||||
"""
|
||||
stripArch = stripSysroot = False
|
||||
|
||||
compiler_so = list(compiler_so)
|
||||
|
||||
if not _supports_universal_builds():
|
||||
# OSX before 10.4.0, these don't support -arch and -isysroot at
|
||||
# all.
|
||||
stripArch = stripSysroot = True
|
||||
else:
|
||||
stripArch = '-arch' in cc_args
|
||||
stripSysroot = '-isysroot' in cc_args
|
||||
|
||||
if stripArch or 'ARCHFLAGS' in os.environ:
|
||||
while True:
|
||||
try:
|
||||
index = compiler_so.index('-arch')
|
||||
# Strip this argument and the next one:
|
||||
del compiler_so[index:index+2]
|
||||
except ValueError:
|
||||
break
|
||||
|
||||
if 'ARCHFLAGS' in os.environ and not stripArch:
|
||||
# User specified different -arch flags in the environ,
|
||||
# see also distutils.sysconfig
|
||||
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
|
||||
|
||||
if stripSysroot:
|
||||
while True:
|
||||
try:
|
||||
index = compiler_so.index('-isysroot')
|
||||
# Strip this argument and the next one:
|
||||
del compiler_so[index:index+2]
|
||||
except ValueError:
|
||||
break
|
||||
|
||||
# Check if the SDK that is used during compilation actually exists,
|
||||
# the universal build requires the usage of a universal SDK and not all
|
||||
# users have that installed by default.
|
||||
sysroot = None
|
||||
if '-isysroot' in cc_args:
|
||||
idx = cc_args.index('-isysroot')
|
||||
sysroot = cc_args[idx+1]
|
||||
elif '-isysroot' in compiler_so:
|
||||
idx = compiler_so.index('-isysroot')
|
||||
sysroot = compiler_so[idx+1]
|
||||
|
||||
if sysroot and not os.path.isdir(sysroot):
|
||||
from distutils import log
|
||||
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
|
||||
sysroot)
|
||||
log.warn("Please check your Xcode installation")
|
||||
|
||||
return compiler_so
|
||||
|
||||
|
||||
def customize_config_vars(_config_vars):
|
||||
"""Customize Python build configuration variables.
|
||||
|
||||
Called internally from sysconfig with a mutable mapping
|
||||
containing name/value pairs parsed from the configured
|
||||
makefile used to build this interpreter. Returns
|
||||
the mapping updated as needed to reflect the environment
|
||||
in which the interpreter is running; in the case of
|
||||
a Python from a binary installer, the installed
|
||||
environment may be very different from the build
|
||||
environment, i.e. different OS levels, different
|
||||
built tools, different available CPU architectures.
|
||||
|
||||
This customization is performed whenever
|
||||
distutils.sysconfig.get_config_vars() is first
|
||||
called. It may be used in environments where no
|
||||
compilers are present, i.e. when installing pure
|
||||
Python dists. Customization of compiler paths
|
||||
and detection of unavailable archs is deferred
|
||||
until the first extension module build is
|
||||
requested (in distutils.sysconfig.customize_compiler).
|
||||
|
||||
Currently called from distutils.sysconfig
|
||||
"""
|
||||
|
||||
if not _supports_universal_builds():
|
||||
# On Mac OS X before 10.4, check if -arch and -isysroot
|
||||
# are in CFLAGS or LDFLAGS and remove them if they are.
|
||||
# This is needed when building extensions on a 10.3 system
|
||||
# using a universal build of python.
|
||||
_remove_universal_flags(_config_vars)
|
||||
|
||||
# Allow user to override all archs with ARCHFLAGS env var
|
||||
_override_all_archs(_config_vars)
|
||||
|
||||
# Remove references to sdks that are not found
|
||||
_check_for_unavailable_sdk(_config_vars)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def customize_compiler(_config_vars):
|
||||
"""Customize compiler path and configuration variables.
|
||||
|
||||
This customization is performed when the first
|
||||
extension module build is requested
|
||||
in distutils.sysconfig.customize_compiler).
|
||||
"""
|
||||
|
||||
# Find a compiler to use for extension module builds
|
||||
_find_appropriate_compiler(_config_vars)
|
||||
|
||||
# Remove ppc arch flags if not supported here
|
||||
_remove_unsupported_archs(_config_vars)
|
||||
|
||||
# Allow user to override all archs with ARCHFLAGS env var
|
||||
_override_all_archs(_config_vars)
|
||||
|
||||
return _config_vars
|
||||
|
||||
|
||||
def get_platform_osx(_config_vars, osname, release, machine):
|
||||
"""Filter values for get_platform()"""
|
||||
# called from get_platform() in sysconfig and distutils.util
|
||||
#
|
||||
# For our purposes, we'll assume that the system version from
|
||||
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
|
||||
# to. This makes the compatibility story a bit more sane because the
|
||||
# machine is going to compile and link as if it were
|
||||
# MACOSX_DEPLOYMENT_TARGET.
|
||||
|
||||
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
|
||||
macrelease = _get_system_version() or macver
|
||||
macver = macver or macrelease
|
||||
|
||||
if macver:
|
||||
release = macver
|
||||
osname = "macosx"
|
||||
|
||||
# Use the original CFLAGS value, if available, so that we
|
||||
# return the same machine type for the platform string.
|
||||
# Otherwise, distutils may consider this a cross-compiling
|
||||
# case and disallow installs.
|
||||
cflags = _config_vars.get(_INITPRE+'CFLAGS',
|
||||
_config_vars.get('CFLAGS', ''))
|
||||
if macrelease:
|
||||
try:
|
||||
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
|
||||
except ValueError:
|
||||
macrelease = (10, 0)
|
||||
else:
|
||||
# assume no universal support
|
||||
macrelease = (10, 0)
|
||||
|
||||
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
|
||||
# The universal build will build fat binaries, but not on
|
||||
# systems before 10.4
|
||||
|
||||
machine = 'fat'
|
||||
|
||||
archs = re.findall('-arch\s+(\S+)', cflags)
|
||||
archs = tuple(sorted(set(archs)))
|
||||
|
||||
if len(archs) == 1:
|
||||
machine = archs[0]
|
||||
elif archs == ('i386', 'ppc'):
|
||||
machine = 'fat'
|
||||
elif archs == ('i386', 'x86_64'):
|
||||
machine = 'intel'
|
||||
elif archs == ('i386', 'ppc', 'x86_64'):
|
||||
machine = 'fat3'
|
||||
elif archs == ('ppc64', 'x86_64'):
|
||||
machine = 'fat64'
|
||||
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
|
||||
machine = 'universal'
|
||||
else:
|
||||
raise ValueError(
|
||||
"Don't know machine value for archs=%r" % (archs,))
|
||||
|
||||
elif machine == 'i386':
|
||||
# On OSX the machine type returned by uname is always the
|
||||
# 32-bit variant, even if the executable architecture is
|
||||
# the 64-bit variant
|
||||
if sys.maxint >= 2**32:
|
||||
machine = 'x86_64'
|
||||
|
||||
elif machine in ('PowerPC', 'Power_Macintosh'):
|
||||
# Pick a sane name for the PPC architecture.
|
||||
# See 'i386' case
|
||||
if sys.maxint >= 2**32:
|
||||
machine = 'ppc64'
|
||||
else:
|
||||
machine = 'ppc'
|
||||
|
||||
return (osname, release, machine)
|
Binary file not shown.
2030
PythonHome/Lib/_pyio.py
Normal file
2030
PythonHome/Lib/_pyio.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
467
PythonHome/Lib/_strptime.py
Normal file
467
PythonHome/Lib/_strptime.py
Normal file
@ -0,0 +1,467 @@
|
||||
"""Strptime-related classes and functions.
|
||||
|
||||
CLASSES:
|
||||
LocaleTime -- Discovers and stores locale-specific time information
|
||||
TimeRE -- Creates regexes for pattern matching a string of text containing
|
||||
time information
|
||||
|
||||
FUNCTIONS:
|
||||
_getlang -- Figure out what language is being used for the locale
|
||||
strptime -- Calculates the time struct represented by the passed-in string
|
||||
|
||||
"""
|
||||
import time
|
||||
import locale
|
||||
import calendar
|
||||
from re import compile as re_compile
|
||||
from re import IGNORECASE
|
||||
from re import escape as re_escape
|
||||
from datetime import date as datetime_date
|
||||
try:
|
||||
from thread import allocate_lock as _thread_allocate_lock
|
||||
except:
|
||||
from dummy_thread import allocate_lock as _thread_allocate_lock
|
||||
|
||||
__all__ = []
|
||||
|
||||
def _getlang():
|
||||
# Figure out what the current language is set to.
|
||||
return locale.getlocale(locale.LC_TIME)
|
||||
|
||||
class LocaleTime(object):
|
||||
"""Stores and handles locale-specific information related to time.
|
||||
|
||||
ATTRIBUTES:
|
||||
f_weekday -- full weekday names (7-item list)
|
||||
a_weekday -- abbreviated weekday names (7-item list)
|
||||
f_month -- full month names (13-item list; dummy value in [0], which
|
||||
is added by code)
|
||||
a_month -- abbreviated month names (13-item list, dummy value in
|
||||
[0], which is added by code)
|
||||
am_pm -- AM/PM representation (2-item list)
|
||||
LC_date_time -- format string for date/time representation (string)
|
||||
LC_date -- format string for date representation (string)
|
||||
LC_time -- format string for time representation (string)
|
||||
timezone -- daylight- and non-daylight-savings timezone representation
|
||||
(2-item list of sets)
|
||||
lang -- Language used by instance (2-item tuple)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Set all attributes.
|
||||
|
||||
Order of methods called matters for dependency reasons.
|
||||
|
||||
The locale language is set at the offset and then checked again before
|
||||
exiting. This is to make sure that the attributes were not set with a
|
||||
mix of information from more than one locale. This would most likely
|
||||
happen when using threads where one thread calls a locale-dependent
|
||||
function while another thread changes the locale while the function in
|
||||
the other thread is still running. Proper coding would call for
|
||||
locks to prevent changing the locale while locale-dependent code is
|
||||
running. The check here is done in case someone does not think about
|
||||
doing this.
|
||||
|
||||
Only other possible issue is if someone changed the timezone and did
|
||||
not call tz.tzset . That is an issue for the programmer, though,
|
||||
since changing the timezone is worthless without that call.
|
||||
|
||||
"""
|
||||
self.lang = _getlang()
|
||||
self.__calc_weekday()
|
||||
self.__calc_month()
|
||||
self.__calc_am_pm()
|
||||
self.__calc_timezone()
|
||||
self.__calc_date_time()
|
||||
if _getlang() != self.lang:
|
||||
raise ValueError("locale changed during initialization")
|
||||
|
||||
def __pad(self, seq, front):
|
||||
# Add '' to seq to either the front (is True), else the back.
|
||||
seq = list(seq)
|
||||
if front:
|
||||
seq.insert(0, '')
|
||||
else:
|
||||
seq.append('')
|
||||
return seq
|
||||
|
||||
def __calc_weekday(self):
|
||||
# Set self.a_weekday and self.f_weekday using the calendar
|
||||
# module.
|
||||
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
|
||||
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
|
||||
self.a_weekday = a_weekday
|
||||
self.f_weekday = f_weekday
|
||||
|
||||
def __calc_month(self):
|
||||
# Set self.f_month and self.a_month using the calendar module.
|
||||
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
|
||||
f_month = [calendar.month_name[i].lower() for i in range(13)]
|
||||
self.a_month = a_month
|
||||
self.f_month = f_month
|
||||
|
||||
def __calc_am_pm(self):
|
||||
# Set self.am_pm by using time.strftime().
|
||||
|
||||
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
|
||||
# magical; just happened to have used it everywhere else where a
|
||||
# static date was needed.
|
||||
am_pm = []
|
||||
for hour in (01,22):
|
||||
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
|
||||
am_pm.append(time.strftime("%p", time_tuple).lower())
|
||||
self.am_pm = am_pm
|
||||
|
||||
def __calc_date_time(self):
|
||||
# Set self.date_time, self.date, & self.time by using
|
||||
# time.strftime().
|
||||
|
||||
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
|
||||
# overloaded numbers is minimized. The order in which searches for
|
||||
# values within the format string is very important; it eliminates
|
||||
# possible ambiguity for what something represents.
|
||||
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
|
||||
date_time = [None, None, None]
|
||||
date_time[0] = time.strftime("%c", time_tuple).lower()
|
||||
date_time[1] = time.strftime("%x", time_tuple).lower()
|
||||
date_time[2] = time.strftime("%X", time_tuple).lower()
|
||||
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
|
||||
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
|
||||
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
|
||||
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
|
||||
('44', '%M'), ('55', '%S'), ('76', '%j'),
|
||||
('17', '%d'), ('03', '%m'), ('3', '%m'),
|
||||
# '3' needed for when no leading zero.
|
||||
('2', '%w'), ('10', '%I')]
|
||||
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
|
||||
for tz in tz_values])
|
||||
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
|
||||
current_format = date_time[offset]
|
||||
for old, new in replacement_pairs:
|
||||
# Must deal with possible lack of locale info
|
||||
# manifesting itself as the empty string (e.g., Swedish's
|
||||
# lack of AM/PM info) or a platform returning a tuple of empty
|
||||
# strings (e.g., MacOS 9 having timezone as ('','')).
|
||||
if old:
|
||||
current_format = current_format.replace(old, new)
|
||||
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
|
||||
# 2005-01-03 occurs before the first Monday of the year. Otherwise
|
||||
# %U is used.
|
||||
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
|
||||
if '00' in time.strftime(directive, time_tuple):
|
||||
U_W = '%W'
|
||||
else:
|
||||
U_W = '%U'
|
||||
date_time[offset] = current_format.replace('11', U_W)
|
||||
self.LC_date_time = date_time[0]
|
||||
self.LC_date = date_time[1]
|
||||
self.LC_time = date_time[2]
|
||||
|
||||
def __calc_timezone(self):
|
||||
# Set self.timezone by using time.tzname.
|
||||
# Do not worry about possibility of time.tzname[0] == timetzname[1]
|
||||
# and time.daylight; handle that in strptime .
|
||||
try:
|
||||
time.tzset()
|
||||
except AttributeError:
|
||||
pass
|
||||
no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
|
||||
if time.daylight:
|
||||
has_saving = frozenset([time.tzname[1].lower()])
|
||||
else:
|
||||
has_saving = frozenset()
|
||||
self.timezone = (no_saving, has_saving)
|
||||
|
||||
|
||||
class TimeRE(dict):
|
||||
"""Handle conversion from format directives to regexes."""
|
||||
|
||||
def __init__(self, locale_time=None):
|
||||
"""Create keys/values.
|
||||
|
||||
Order of execution is important for dependency reasons.
|
||||
|
||||
"""
|
||||
if locale_time:
|
||||
self.locale_time = locale_time
|
||||
else:
|
||||
self.locale_time = LocaleTime()
|
||||
base = super(TimeRE, self)
|
||||
base.__init__({
|
||||
# The " \d" part of the regex is to make %c from ANSI C work
|
||||
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
|
||||
'f': r"(?P<f>[0-9]{1,6})",
|
||||
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
|
||||
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
|
||||
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
|
||||
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
|
||||
'M': r"(?P<M>[0-5]\d|\d)",
|
||||
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
|
||||
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
|
||||
'w': r"(?P<w>[0-6])",
|
||||
# W is set below by using 'U'
|
||||
'y': r"(?P<y>\d\d)",
|
||||
#XXX: Does 'Y' need to worry about having less or more than
|
||||
# 4 digits?
|
||||
'Y': r"(?P<Y>\d\d\d\d)",
|
||||
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
|
||||
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
|
||||
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
|
||||
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
|
||||
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
|
||||
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
|
||||
for tz in tz_names),
|
||||
'Z'),
|
||||
'%': '%'})
|
||||
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
|
||||
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
|
||||
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
|
||||
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
|
||||
|
||||
def __seqToRE(self, to_convert, directive):
|
||||
"""Convert a list to a regex string for matching a directive.
|
||||
|
||||
Want possible matching values to be from longest to shortest. This
|
||||
prevents the possibility of a match occurring for a value that also
|
||||
a substring of a larger value that should have matched (e.g., 'abc'
|
||||
matching when 'abcdef' should have been the match).
|
||||
|
||||
"""
|
||||
to_convert = sorted(to_convert, key=len, reverse=True)
|
||||
for value in to_convert:
|
||||
if value != '':
|
||||
break
|
||||
else:
|
||||
return ''
|
||||
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
|
||||
regex = '(?P<%s>%s' % (directive, regex)
|
||||
return '%s)' % regex
|
||||
|
||||
def pattern(self, format):
|
||||
"""Return regex pattern for the format string.
|
||||
|
||||
Need to make sure that any characters that might be interpreted as
|
||||
regex syntax are escaped.
|
||||
|
||||
"""
|
||||
processed_format = ''
|
||||
# The sub() call escapes all characters that might be misconstrued
|
||||
# as regex syntax. Cannot use re.escape since we have to deal with
|
||||
# format directives (%m, etc.).
|
||||
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
|
||||
format = regex_chars.sub(r"\\\1", format)
|
||||
whitespace_replacement = re_compile('\s+')
|
||||
format = whitespace_replacement.sub('\s+', format)
|
||||
while '%' in format:
|
||||
directive_index = format.index('%')+1
|
||||
processed_format = "%s%s%s" % (processed_format,
|
||||
format[:directive_index-1],
|
||||
self[format[directive_index]])
|
||||
format = format[directive_index+1:]
|
||||
return "%s%s" % (processed_format, format)
|
||||
|
||||
def compile(self, format):
|
||||
"""Return a compiled re object for the format string."""
|
||||
return re_compile(self.pattern(format), IGNORECASE)
|
||||
|
||||
_cache_lock = _thread_allocate_lock()
|
||||
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
|
||||
# first!
|
||||
_TimeRE_cache = TimeRE()
|
||||
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
|
||||
_regex_cache = {}
|
||||
|
||||
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
|
||||
"""Calculate the Julian day based on the year, week of the year, and day of
|
||||
the week, with week_start_day representing whether the week of the year
|
||||
assumes the week starts on Sunday or Monday (6 or 0)."""
|
||||
first_weekday = datetime_date(year, 1, 1).weekday()
|
||||
# If we are dealing with the %U directive (week starts on Sunday), it's
|
||||
# easier to just shift the view to Sunday being the first day of the
|
||||
# week.
|
||||
if not week_starts_Mon:
|
||||
first_weekday = (first_weekday + 1) % 7
|
||||
day_of_week = (day_of_week + 1) % 7
|
||||
# Need to watch out for a week 0 (when the first day of the year is not
|
||||
# the same as that specified by %U or %W).
|
||||
week_0_length = (7 - first_weekday) % 7
|
||||
if week_of_year == 0:
|
||||
return 1 + day_of_week - first_weekday
|
||||
else:
|
||||
days_to_week = week_0_length + (7 * (week_of_year - 1))
|
||||
return 1 + days_to_week + day_of_week
|
||||
|
||||
|
||||
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
"""Return a time struct based on the input string and the format string."""
|
||||
global _TimeRE_cache, _regex_cache
|
||||
with _cache_lock:
|
||||
if _getlang() != _TimeRE_cache.locale_time.lang:
|
||||
_TimeRE_cache = TimeRE()
|
||||
_regex_cache.clear()
|
||||
if len(_regex_cache) > _CACHE_MAX_SIZE:
|
||||
_regex_cache.clear()
|
||||
locale_time = _TimeRE_cache.locale_time
|
||||
format_regex = _regex_cache.get(format)
|
||||
if not format_regex:
|
||||
try:
|
||||
format_regex = _TimeRE_cache.compile(format)
|
||||
# KeyError raised when a bad format is found; can be specified as
|
||||
# \\, in which case it was a stray % but with a space after it
|
||||
except KeyError, err:
|
||||
bad_directive = err.args[0]
|
||||
if bad_directive == "\\":
|
||||
bad_directive = "%"
|
||||
del err
|
||||
raise ValueError("'%s' is a bad directive in format '%s'" %
|
||||
(bad_directive, format))
|
||||
# IndexError only occurs when the format string is "%"
|
||||
except IndexError:
|
||||
raise ValueError("stray %% in format '%s'" % format)
|
||||
_regex_cache[format] = format_regex
|
||||
found = format_regex.match(data_string)
|
||||
if not found:
|
||||
raise ValueError("time data %r does not match format %r" %
|
||||
(data_string, format))
|
||||
if len(data_string) != found.end():
|
||||
raise ValueError("unconverted data remains: %s" %
|
||||
data_string[found.end():])
|
||||
|
||||
year = None
|
||||
month = day = 1
|
||||
hour = minute = second = fraction = 0
|
||||
tz = -1
|
||||
# Default to -1 to signify that values not known; not critical to have,
|
||||
# though
|
||||
week_of_year = -1
|
||||
week_of_year_start = -1
|
||||
# weekday and julian defaulted to -1 so as to signal need to calculate
|
||||
# values
|
||||
weekday = julian = -1
|
||||
found_dict = found.groupdict()
|
||||
for group_key in found_dict.iterkeys():
|
||||
# Directives not explicitly handled below:
|
||||
# c, x, X
|
||||
# handled by making out of other directives
|
||||
# U, W
|
||||
# worthless without day of the week
|
||||
if group_key == 'y':
|
||||
year = int(found_dict['y'])
|
||||
# Open Group specification for strptime() states that a %y
|
||||
#value in the range of [00, 68] is in the century 2000, while
|
||||
#[69,99] is in the century 1900
|
||||
if year <= 68:
|
||||
year += 2000
|
||||
else:
|
||||
year += 1900
|
||||
elif group_key == 'Y':
|
||||
year = int(found_dict['Y'])
|
||||
elif group_key == 'm':
|
||||
month = int(found_dict['m'])
|
||||
elif group_key == 'B':
|
||||
month = locale_time.f_month.index(found_dict['B'].lower())
|
||||
elif group_key == 'b':
|
||||
month = locale_time.a_month.index(found_dict['b'].lower())
|
||||
elif group_key == 'd':
|
||||
day = int(found_dict['d'])
|
||||
elif group_key == 'H':
|
||||
hour = int(found_dict['H'])
|
||||
elif group_key == 'I':
|
||||
hour = int(found_dict['I'])
|
||||
ampm = found_dict.get('p', '').lower()
|
||||
# If there was no AM/PM indicator, we'll treat this like AM
|
||||
if ampm in ('', locale_time.am_pm[0]):
|
||||
# We're in AM so the hour is correct unless we're
|
||||
# looking at 12 midnight.
|
||||
# 12 midnight == 12 AM == hour 0
|
||||
if hour == 12:
|
||||
hour = 0
|
||||
elif ampm == locale_time.am_pm[1]:
|
||||
# We're in PM so we need to add 12 to the hour unless
|
||||
# we're looking at 12 noon.
|
||||
# 12 noon == 12 PM == hour 12
|
||||
if hour != 12:
|
||||
hour += 12
|
||||
elif group_key == 'M':
|
||||
minute = int(found_dict['M'])
|
||||
elif group_key == 'S':
|
||||
second = int(found_dict['S'])
|
||||
elif group_key == 'f':
|
||||
s = found_dict['f']
|
||||
# Pad to always return microseconds.
|
||||
s += "0" * (6 - len(s))
|
||||
fraction = int(s)
|
||||
elif group_key == 'A':
|
||||
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
|
||||
elif group_key == 'a':
|
||||
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
|
||||
elif group_key == 'w':
|
||||
weekday = int(found_dict['w'])
|
||||
if weekday == 0:
|
||||
weekday = 6
|
||||
else:
|
||||
weekday -= 1
|
||||
elif group_key == 'j':
|
||||
julian = int(found_dict['j'])
|
||||
elif group_key in ('U', 'W'):
|
||||
week_of_year = int(found_dict[group_key])
|
||||
if group_key == 'U':
|
||||
# U starts week on Sunday.
|
||||
week_of_year_start = 6
|
||||
else:
|
||||
# W starts week on Monday.
|
||||
week_of_year_start = 0
|
||||
elif group_key == 'Z':
|
||||
# Since -1 is default value only need to worry about setting tz if
|
||||
# it can be something other than -1.
|
||||
found_zone = found_dict['Z'].lower()
|
||||
for value, tz_values in enumerate(locale_time.timezone):
|
||||
if found_zone in tz_values:
|
||||
# Deal with bad locale setup where timezone names are the
|
||||
# same and yet time.daylight is true; too ambiguous to
|
||||
# be able to tell what timezone has daylight savings
|
||||
if (time.tzname[0] == time.tzname[1] and
|
||||
time.daylight and found_zone not in ("utc", "gmt")):
|
||||
break
|
||||
else:
|
||||
tz = value
|
||||
break
|
||||
leap_year_fix = False
|
||||
if year is None and month == 2 and day == 29:
|
||||
year = 1904 # 1904 is first leap year of 20th century
|
||||
leap_year_fix = True
|
||||
elif year is None:
|
||||
year = 1900
|
||||
# If we know the week of the year and what day of that week, we can figure
|
||||
# out the Julian day of the year.
|
||||
if julian == -1 and week_of_year != -1 and weekday != -1:
|
||||
week_starts_Mon = True if week_of_year_start == 0 else False
|
||||
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
|
||||
week_starts_Mon)
|
||||
# Cannot pre-calculate datetime_date() since can change in Julian
|
||||
# calculation and thus could have different value for the day of the week
|
||||
# calculation.
|
||||
if julian == -1:
|
||||
# Need to add 1 to result since first day of the year is 1, not 0.
|
||||
julian = datetime_date(year, month, day).toordinal() - \
|
||||
datetime_date(year, 1, 1).toordinal() + 1
|
||||
else: # Assume that if they bothered to include Julian day it will
|
||||
# be accurate.
|
||||
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
|
||||
year = datetime_result.year
|
||||
month = datetime_result.month
|
||||
day = datetime_result.day
|
||||
if weekday == -1:
|
||||
weekday = datetime_date(year, month, day).weekday()
|
||||
if leap_year_fix:
|
||||
# the caller didn't supply a year but asked for Feb 29th. We couldn't
|
||||
# use the default of 1900 for computations. We set it back to ensure
|
||||
# that February 29th is smaller than March 1st.
|
||||
year = 1900
|
||||
|
||||
return (time.struct_time((year, month, day,
|
||||
hour, minute, second,
|
||||
weekday, julian, tz)), fraction)
|
||||
|
||||
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
|
||||
return _strptime(data_string, format)[0]
|
Binary file not shown.
251
PythonHome/Lib/_threading_local.py
Normal file
251
PythonHome/Lib/_threading_local.py
Normal file
@ -0,0 +1,251 @@
|
||||
"""Thread-local objects.
|
||||
|
||||
(Note that this module provides a Python version of the threading.local
|
||||
class. Depending on the version of Python you're using, there may be a
|
||||
faster one available. You should always import the `local` class from
|
||||
`threading`.)
|
||||
|
||||
Thread-local objects support the management of thread-local data.
|
||||
If you have data that you want to be local to a thread, simply create
|
||||
a thread-local object and use its attributes:
|
||||
|
||||
>>> mydata = local()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
You can also access the local-object's dictionary:
|
||||
|
||||
>>> mydata.__dict__
|
||||
{'number': 42}
|
||||
>>> mydata.__dict__.setdefault('widgets', [])
|
||||
[]
|
||||
>>> mydata.widgets
|
||||
[]
|
||||
|
||||
What's important about thread-local objects is that their data are
|
||||
local to a thread. If we access the data in a different thread:
|
||||
|
||||
>>> log = []
|
||||
>>> def f():
|
||||
... items = mydata.__dict__.items()
|
||||
... items.sort()
|
||||
... log.append(items)
|
||||
... mydata.number = 11
|
||||
... log.append(mydata.number)
|
||||
|
||||
>>> import threading
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
>>> log
|
||||
[[], 11]
|
||||
|
||||
we get different data. Furthermore, changes made in the other thread
|
||||
don't affect data seen in this thread:
|
||||
|
||||
>>> mydata.number
|
||||
42
|
||||
|
||||
Of course, values you get from a local object, including a __dict__
|
||||
attribute, are for whatever thread was current at the time the
|
||||
attribute was read. For that reason, you generally don't want to save
|
||||
these values across threads, as they apply only to the thread they
|
||||
came from.
|
||||
|
||||
You can create custom local objects by subclassing the local class:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... number = 2
|
||||
... initialized = False
|
||||
... def __init__(self, **kw):
|
||||
... if self.initialized:
|
||||
... raise SystemError('__init__ called too many times')
|
||||
... self.initialized = True
|
||||
... self.__dict__.update(kw)
|
||||
... def squared(self):
|
||||
... return self.number ** 2
|
||||
|
||||
This can be useful to support default values, methods and
|
||||
initialization. Note that if you define an __init__ method, it will be
|
||||
called each time the local object is used in a separate thread. This
|
||||
is necessary to initialize each thread's dictionary.
|
||||
|
||||
Now if we create a local object:
|
||||
|
||||
>>> mydata = MyLocal(color='red')
|
||||
|
||||
Now we have a default number:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
|
||||
an initial color:
|
||||
|
||||
>>> mydata.color
|
||||
'red'
|
||||
>>> del mydata.color
|
||||
|
||||
And a method that operates on the data:
|
||||
|
||||
>>> mydata.squared()
|
||||
4
|
||||
|
||||
As before, we can access the data in a separate thread:
|
||||
|
||||
>>> log = []
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
>>> log
|
||||
[[('color', 'red'), ('initialized', True)], 11]
|
||||
|
||||
without affecting this thread's data:
|
||||
|
||||
>>> mydata.number
|
||||
2
|
||||
>>> mydata.color
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AttributeError: 'MyLocal' object has no attribute 'color'
|
||||
|
||||
Note that subclasses can define slots, but they are not thread
|
||||
local. They are shared across threads:
|
||||
|
||||
>>> class MyLocal(local):
|
||||
... __slots__ = 'number'
|
||||
|
||||
>>> mydata = MyLocal()
|
||||
>>> mydata.number = 42
|
||||
>>> mydata.color = 'red'
|
||||
|
||||
So, the separate thread:
|
||||
|
||||
>>> thread = threading.Thread(target=f)
|
||||
>>> thread.start()
|
||||
>>> thread.join()
|
||||
|
||||
affects what we see:
|
||||
|
||||
>>> mydata.number
|
||||
11
|
||||
|
||||
>>> del mydata
|
||||
"""
|
||||
|
||||
__all__ = ["local"]
|
||||
|
||||
# We need to use objects from the threading module, but the threading
|
||||
# module may also want to use our `local` class, if support for locals
|
||||
# isn't compiled in to the `thread` module. This creates potential problems
|
||||
# with circular imports. For that reason, we don't import `threading`
|
||||
# until the bottom of this file (a hack sufficient to worm around the
|
||||
# potential problems). Note that almost all platforms do have support for
|
||||
# locals in the `thread` module, and there is no circular import problem
|
||||
# then, so problems introduced by fiddling the order of imports here won't
|
||||
# manifest on most boxes.
|
||||
|
||||
class _localbase(object):
|
||||
__slots__ = '_local__key', '_local__args', '_local__lock'
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
self = object.__new__(cls)
|
||||
key = '_local__key', 'thread.local.' + str(id(self))
|
||||
object.__setattr__(self, '_local__key', key)
|
||||
object.__setattr__(self, '_local__args', (args, kw))
|
||||
object.__setattr__(self, '_local__lock', RLock())
|
||||
|
||||
if (args or kw) and (cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
|
||||
# We need to create the thread dict in anticipation of
|
||||
# __init__ being called, to make sure we don't call it
|
||||
# again ourselves.
|
||||
dict = object.__getattribute__(self, '__dict__')
|
||||
current_thread().__dict__[key] = dict
|
||||
|
||||
return self
|
||||
|
||||
def _patch(self):
|
||||
key = object.__getattribute__(self, '_local__key')
|
||||
d = current_thread().__dict__.get(key)
|
||||
if d is None:
|
||||
d = {}
|
||||
current_thread().__dict__[key] = d
|
||||
object.__setattr__(self, '__dict__', d)
|
||||
|
||||
# we have a new instance dict, so call out __init__ if we have
|
||||
# one
|
||||
cls = type(self)
|
||||
if cls.__init__ is not object.__init__:
|
||||
args, kw = object.__getattribute__(self, '_local__args')
|
||||
cls.__init__(self, *args, **kw)
|
||||
else:
|
||||
object.__setattr__(self, '__dict__', d)
|
||||
|
||||
class local(_localbase):
|
||||
|
||||
def __getattribute__(self, name):
|
||||
lock = object.__getattribute__(self, '_local__lock')
|
||||
lock.acquire()
|
||||
try:
|
||||
_patch(self)
|
||||
return object.__getattribute__(self, name)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
lock = object.__getattribute__(self, '_local__lock')
|
||||
lock.acquire()
|
||||
try:
|
||||
_patch(self)
|
||||
return object.__setattr__(self, name, value)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
lock = object.__getattribute__(self, '_local__lock')
|
||||
lock.acquire()
|
||||
try:
|
||||
_patch(self)
|
||||
return object.__delattr__(self, name)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
def __del__(self):
|
||||
import threading
|
||||
|
||||
key = object.__getattribute__(self, '_local__key')
|
||||
|
||||
try:
|
||||
# We use the non-locking API since we might already hold the lock
|
||||
# (__del__ can be called at any point by the cyclic GC).
|
||||
threads = threading._enumerate()
|
||||
except:
|
||||
# If enumerating the current threads fails, as it seems to do
|
||||
# during shutdown, we'll skip cleanup under the assumption
|
||||
# that there is nothing to clean up.
|
||||
return
|
||||
|
||||
for thread in threads:
|
||||
try:
|
||||
__dict__ = thread.__dict__
|
||||
except AttributeError:
|
||||
# Thread is dying, rest in peace.
|
||||
continue
|
||||
|
||||
if key in __dict__:
|
||||
try:
|
||||
del __dict__[key]
|
||||
except KeyError:
|
||||
pass # didn't have anything in this thread
|
||||
|
||||
from threading import current_thread, RLock
|
Binary file not shown.
204
PythonHome/Lib/_weakrefset.py
Normal file
204
PythonHome/Lib/_weakrefset.py
Normal file
@ -0,0 +1,204 @@
|
||||
# Access WeakSet through the weakref module.
|
||||
# This code is separated-out because it is needed
|
||||
# by abc.py to load everything else at startup.
|
||||
|
||||
from _weakref import ref
|
||||
|
||||
__all__ = ['WeakSet']
|
||||
|
||||
|
||||
class _IterationGuard(object):
|
||||
# This context manager registers itself in the current iterators of the
|
||||
# weak container, such as to delay all removals until the context manager
|
||||
# exits.
|
||||
# This technique should be relatively thread-safe (since sets are).
|
||||
|
||||
def __init__(self, weakcontainer):
|
||||
# Don't create cycles
|
||||
self.weakcontainer = ref(weakcontainer)
|
||||
|
||||
def __enter__(self):
|
||||
w = self.weakcontainer()
|
||||
if w is not None:
|
||||
w._iterating.add(self)
|
||||
return self
|
||||
|
||||
def __exit__(self, e, t, b):
|
||||
w = self.weakcontainer()
|
||||
if w is not None:
|
||||
s = w._iterating
|
||||
s.remove(self)
|
||||
if not s:
|
||||
w._commit_removals()
|
||||
|
||||
|
||||
class WeakSet(object):
|
||||
def __init__(self, data=None):
|
||||
self.data = set()
|
||||
def _remove(item, selfref=ref(self)):
|
||||
self = selfref()
|
||||
if self is not None:
|
||||
if self._iterating:
|
||||
self._pending_removals.append(item)
|
||||
else:
|
||||
self.data.discard(item)
|
||||
self._remove = _remove
|
||||
# A list of keys to be removed
|
||||
self._pending_removals = []
|
||||
self._iterating = set()
|
||||
if data is not None:
|
||||
self.update(data)
|
||||
|
||||
def _commit_removals(self):
|
||||
l = self._pending_removals
|
||||
discard = self.data.discard
|
||||
while l:
|
||||
discard(l.pop())
|
||||
|
||||
def __iter__(self):
|
||||
with _IterationGuard(self):
|
||||
for itemref in self.data:
|
||||
item = itemref()
|
||||
if item is not None:
|
||||
# Caveat: the iterator will keep a strong reference to
|
||||
# `item` until it is resumed or closed.
|
||||
yield item
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data) - len(self._pending_removals)
|
||||
|
||||
def __contains__(self, item):
|
||||
try:
|
||||
wr = ref(item)
|
||||
except TypeError:
|
||||
return False
|
||||
return wr in self.data
|
||||
|
||||
def __reduce__(self):
|
||||
return (self.__class__, (list(self),),
|
||||
getattr(self, '__dict__', None))
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def add(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.add(ref(item, self._remove))
|
||||
|
||||
def clear(self):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.clear()
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
def pop(self):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
while True:
|
||||
try:
|
||||
itemref = self.data.pop()
|
||||
except KeyError:
|
||||
raise KeyError('pop from empty WeakSet')
|
||||
item = itemref()
|
||||
if item is not None:
|
||||
return item
|
||||
|
||||
def remove(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.remove(ref(item))
|
||||
|
||||
def discard(self, item):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.discard(ref(item))
|
||||
|
||||
def update(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
for element in other:
|
||||
self.add(element)
|
||||
|
||||
def __ior__(self, other):
|
||||
self.update(other)
|
||||
return self
|
||||
|
||||
def difference(self, other):
|
||||
newset = self.copy()
|
||||
newset.difference_update(other)
|
||||
return newset
|
||||
__sub__ = difference
|
||||
|
||||
def difference_update(self, other):
|
||||
self.__isub__(other)
|
||||
def __isub__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
if self is other:
|
||||
self.data.clear()
|
||||
else:
|
||||
self.data.difference_update(ref(item) for item in other)
|
||||
return self
|
||||
|
||||
def intersection(self, other):
|
||||
return self.__class__(item for item in other if item in self)
|
||||
__and__ = intersection
|
||||
|
||||
def intersection_update(self, other):
|
||||
self.__iand__(other)
|
||||
def __iand__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
self.data.intersection_update(ref(item) for item in other)
|
||||
return self
|
||||
|
||||
def issubset(self, other):
|
||||
return self.data.issubset(ref(item) for item in other)
|
||||
__le__ = issubset
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.data < set(ref(item) for item in other)
|
||||
|
||||
def issuperset(self, other):
|
||||
return self.data.issuperset(ref(item) for item in other)
|
||||
__ge__ = issuperset
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.data > set(ref(item) for item in other)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self.data == set(ref(item) for item in other)
|
||||
|
||||
def __ne__(self, other):
|
||||
opposite = self.__eq__(other)
|
||||
if opposite is NotImplemented:
|
||||
return NotImplemented
|
||||
return not opposite
|
||||
|
||||
def symmetric_difference(self, other):
|
||||
newset = self.copy()
|
||||
newset.symmetric_difference_update(other)
|
||||
return newset
|
||||
__xor__ = symmetric_difference
|
||||
|
||||
def symmetric_difference_update(self, other):
|
||||
self.__ixor__(other)
|
||||
def __ixor__(self, other):
|
||||
if self._pending_removals:
|
||||
self._commit_removals()
|
||||
if self is other:
|
||||
self.data.clear()
|
||||
else:
|
||||
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
|
||||
return self
|
||||
|
||||
def union(self, other):
|
||||
return self.__class__(e for s in (self, other) for e in s)
|
||||
__or__ = union
|
||||
|
||||
def isdisjoint(self, other):
|
||||
return len(self.intersection(other)) == 0
|
Binary file not shown.
185
PythonHome/Lib/abc.py
Normal file
185
PythonHome/Lib/abc.py
Normal file
@ -0,0 +1,185 @@
|
||||
# Copyright 2007 Google, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Abstract Base Classes (ABCs) according to PEP 3119."""
|
||||
|
||||
import types
|
||||
|
||||
from _weakrefset import WeakSet
|
||||
|
||||
# Instance of old-style class
|
||||
class _C: pass
|
||||
_InstanceType = type(_C())
|
||||
|
||||
|
||||
def abstractmethod(funcobj):
|
||||
"""A decorator indicating abstract methods.
|
||||
|
||||
Requires that the metaclass is ABCMeta or derived from it. A
|
||||
class that has a metaclass derived from ABCMeta cannot be
|
||||
instantiated unless all of its abstract methods are overridden.
|
||||
The abstract methods can be called using any of the normal
|
||||
'super' call mechanisms.
|
||||
|
||||
Usage:
|
||||
|
||||
class C:
|
||||
__metaclass__ = ABCMeta
|
||||
@abstractmethod
|
||||
def my_abstract_method(self, ...):
|
||||
...
|
||||
"""
|
||||
funcobj.__isabstractmethod__ = True
|
||||
return funcobj
|
||||
|
||||
|
||||
class abstractproperty(property):
|
||||
"""A decorator indicating abstract properties.
|
||||
|
||||
Requires that the metaclass is ABCMeta or derived from it. A
|
||||
class that has a metaclass derived from ABCMeta cannot be
|
||||
instantiated unless all of its abstract properties are overridden.
|
||||
The abstract properties can be called using any of the normal
|
||||
'super' call mechanisms.
|
||||
|
||||
Usage:
|
||||
|
||||
class C:
|
||||
__metaclass__ = ABCMeta
|
||||
@abstractproperty
|
||||
def my_abstract_property(self):
|
||||
...
|
||||
|
||||
This defines a read-only property; you can also define a read-write
|
||||
abstract property using the 'long' form of property declaration:
|
||||
|
||||
class C:
|
||||
__metaclass__ = ABCMeta
|
||||
def getx(self): ...
|
||||
def setx(self, value): ...
|
||||
x = abstractproperty(getx, setx)
|
||||
"""
|
||||
__isabstractmethod__ = True
|
||||
|
||||
|
||||
class ABCMeta(type):
|
||||
|
||||
"""Metaclass for defining Abstract Base Classes (ABCs).
|
||||
|
||||
Use this metaclass to create an ABC. An ABC can be subclassed
|
||||
directly, and then acts as a mix-in class. You can also register
|
||||
unrelated concrete classes (even built-in classes) and unrelated
|
||||
ABCs as 'virtual subclasses' -- these and their descendants will
|
||||
be considered subclasses of the registering ABC by the built-in
|
||||
issubclass() function, but the registering ABC won't show up in
|
||||
their MRO (Method Resolution Order) nor will method
|
||||
implementations defined by the registering ABC be callable (not
|
||||
even via super()).
|
||||
|
||||
"""
|
||||
|
||||
# A global counter that is incremented each time a class is
|
||||
# registered as a virtual subclass of anything. It forces the
|
||||
# negative cache to be cleared before its next use.
|
||||
_abc_invalidation_counter = 0
|
||||
|
||||
def __new__(mcls, name, bases, namespace):
|
||||
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
|
||||
# Compute set of abstract method names
|
||||
abstracts = set(name
|
||||
for name, value in namespace.items()
|
||||
if getattr(value, "__isabstractmethod__", False))
|
||||
for base in bases:
|
||||
for name in getattr(base, "__abstractmethods__", set()):
|
||||
value = getattr(cls, name, None)
|
||||
if getattr(value, "__isabstractmethod__", False):
|
||||
abstracts.add(name)
|
||||
cls.__abstractmethods__ = frozenset(abstracts)
|
||||
# Set up inheritance registry
|
||||
cls._abc_registry = WeakSet()
|
||||
cls._abc_cache = WeakSet()
|
||||
cls._abc_negative_cache = WeakSet()
|
||||
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||
return cls
|
||||
|
||||
def register(cls, subclass):
|
||||
"""Register a virtual subclass of an ABC."""
|
||||
if not isinstance(subclass, (type, types.ClassType)):
|
||||
raise TypeError("Can only register classes")
|
||||
if issubclass(subclass, cls):
|
||||
return # Already a subclass
|
||||
# Subtle: test for cycles *after* testing for "already a subclass";
|
||||
# this means we allow X.register(X) and interpret it as a no-op.
|
||||
if issubclass(cls, subclass):
|
||||
# This would create a cycle, which is bad for the algorithm below
|
||||
raise RuntimeError("Refusing to create an inheritance cycle")
|
||||
cls._abc_registry.add(subclass)
|
||||
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
|
||||
|
||||
def _dump_registry(cls, file=None):
|
||||
"""Debug helper to print the ABC registry."""
|
||||
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
|
||||
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
|
||||
for name in sorted(cls.__dict__.keys()):
|
||||
if name.startswith("_abc_"):
|
||||
value = getattr(cls, name)
|
||||
print >> file, "%s: %r" % (name, value)
|
||||
|
||||
def __instancecheck__(cls, instance):
|
||||
"""Override for isinstance(instance, cls)."""
|
||||
# Inline the cache checking when it's simple.
|
||||
subclass = getattr(instance, '__class__', None)
|
||||
if subclass is not None and subclass in cls._abc_cache:
|
||||
return True
|
||||
subtype = type(instance)
|
||||
# Old-style instances
|
||||
if subtype is _InstanceType:
|
||||
subtype = subclass
|
||||
if subtype is subclass or subclass is None:
|
||||
if (cls._abc_negative_cache_version ==
|
||||
ABCMeta._abc_invalidation_counter and
|
||||
subtype in cls._abc_negative_cache):
|
||||
return False
|
||||
# Fall back to the subclass check.
|
||||
return cls.__subclasscheck__(subtype)
|
||||
return (cls.__subclasscheck__(subclass) or
|
||||
cls.__subclasscheck__(subtype))
|
||||
|
||||
def __subclasscheck__(cls, subclass):
|
||||
"""Override for issubclass(subclass, cls)."""
|
||||
# Check cache
|
||||
if subclass in cls._abc_cache:
|
||||
return True
|
||||
# Check negative cache; may have to invalidate
|
||||
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
|
||||
# Invalidate the negative cache
|
||||
cls._abc_negative_cache = WeakSet()
|
||||
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
|
||||
elif subclass in cls._abc_negative_cache:
|
||||
return False
|
||||
# Check the subclass hook
|
||||
ok = cls.__subclasshook__(subclass)
|
||||
if ok is not NotImplemented:
|
||||
assert isinstance(ok, bool)
|
||||
if ok:
|
||||
cls._abc_cache.add(subclass)
|
||||
else:
|
||||
cls._abc_negative_cache.add(subclass)
|
||||
return ok
|
||||
# Check if it's a direct subclass
|
||||
if cls in getattr(subclass, '__mro__', ()):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# Check if it's a subclass of a registered class (recursive)
|
||||
for rcls in cls._abc_registry:
|
||||
if issubclass(subclass, rcls):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# Check if it's a subclass of a subclass (recursive)
|
||||
for scls in cls.__subclasses__():
|
||||
if issubclass(subclass, scls):
|
||||
cls._abc_cache.add(subclass)
|
||||
return True
|
||||
# No dice; update negative cache
|
||||
cls._abc_negative_cache.add(subclass)
|
||||
return False
|
Binary file not shown.
986
PythonHome/Lib/aifc.py
Normal file
986
PythonHome/Lib/aifc.py
Normal file
@ -0,0 +1,986 @@
|
||||
"""Stuff to parse AIFF-C and AIFF files.
|
||||
|
||||
Unless explicitly stated otherwise, the description below is true
|
||||
both for AIFF-C files and AIFF files.
|
||||
|
||||
An AIFF-C file has the following structure.
|
||||
|
||||
+-----------------+
|
||||
| FORM |
|
||||
+-----------------+
|
||||
| <size> |
|
||||
+----+------------+
|
||||
| | AIFC |
|
||||
| +------------+
|
||||
| | <chunks> |
|
||||
| | . |
|
||||
| | . |
|
||||
| | . |
|
||||
+----+------------+
|
||||
|
||||
An AIFF file has the string "AIFF" instead of "AIFC".
|
||||
|
||||
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
|
||||
big endian order), followed by the data. The size field does not include
|
||||
the size of the 8 byte header.
|
||||
|
||||
The following chunk types are recognized.
|
||||
|
||||
FVER
|
||||
<version number of AIFF-C defining document> (AIFF-C only).
|
||||
MARK
|
||||
<# of markers> (2 bytes)
|
||||
list of markers:
|
||||
<marker ID> (2 bytes, must be > 0)
|
||||
<position> (4 bytes)
|
||||
<marker name> ("pstring")
|
||||
COMM
|
||||
<# of channels> (2 bytes)
|
||||
<# of sound frames> (4 bytes)
|
||||
<size of the samples> (2 bytes)
|
||||
<sampling frequency> (10 bytes, IEEE 80-bit extended
|
||||
floating point)
|
||||
in AIFF-C files only:
|
||||
<compression type> (4 bytes)
|
||||
<human-readable version of compression type> ("pstring")
|
||||
SSND
|
||||
<offset> (4 bytes, not used by this program)
|
||||
<blocksize> (4 bytes, not used by this program)
|
||||
<sound data>
|
||||
|
||||
A pstring consists of 1 byte length, a string of characters, and 0 or 1
|
||||
byte pad to make the total length even.
|
||||
|
||||
Usage.
|
||||
|
||||
Reading AIFF files:
|
||||
f = aifc.open(file, 'r')
|
||||
where file is either the name of a file or an open file pointer.
|
||||
The open file pointer must have methods read(), seek(), and close().
|
||||
In some types of audio files, if the setpos() method is not used,
|
||||
the seek() method is not necessary.
|
||||
|
||||
This returns an instance of a class with the following public methods:
|
||||
getnchannels() -- returns number of audio channels (1 for
|
||||
mono, 2 for stereo)
|
||||
getsampwidth() -- returns sample width in bytes
|
||||
getframerate() -- returns sampling frequency
|
||||
getnframes() -- returns number of audio frames
|
||||
getcomptype() -- returns compression type ('NONE' for AIFF files)
|
||||
getcompname() -- returns human-readable version of
|
||||
compression type ('not compressed' for AIFF files)
|
||||
getparams() -- returns a tuple consisting of all of the
|
||||
above in the above order
|
||||
getmarkers() -- get the list of marks in the audio file or None
|
||||
if there are no marks
|
||||
getmark(id) -- get mark with the specified id (raises an error
|
||||
if the mark does not exist)
|
||||
readframes(n) -- returns at most n frames of audio
|
||||
rewind() -- rewind to the beginning of the audio stream
|
||||
setpos(pos) -- seek to the specified position
|
||||
tell() -- return the current position
|
||||
close() -- close the instance (make it unusable)
|
||||
The position returned by tell(), the position given to setpos() and
|
||||
the position of marks are all compatible and have nothing to do with
|
||||
the actual position in the file.
|
||||
The close() method is called automatically when the class instance
|
||||
is destroyed.
|
||||
|
||||
Writing AIFF files:
|
||||
f = aifc.open(file, 'w')
|
||||
where file is either the name of a file or an open file pointer.
|
||||
The open file pointer must have methods write(), tell(), seek(), and
|
||||
close().
|
||||
|
||||
This returns an instance of a class with the following public methods:
|
||||
aiff() -- create an AIFF file (AIFF-C default)
|
||||
aifc() -- create an AIFF-C file
|
||||
setnchannels(n) -- set the number of channels
|
||||
setsampwidth(n) -- set the sample width
|
||||
setframerate(n) -- set the frame rate
|
||||
setnframes(n) -- set the number of frames
|
||||
setcomptype(type, name)
|
||||
-- set the compression type and the
|
||||
human-readable compression type
|
||||
setparams(tuple)
|
||||
-- set all parameters at once
|
||||
setmark(id, pos, name)
|
||||
-- add specified mark to the list of marks
|
||||
tell() -- return current position in output file (useful
|
||||
in combination with setmark())
|
||||
writeframesraw(data)
|
||||
-- write audio frames without pathing up the
|
||||
file header
|
||||
writeframes(data)
|
||||
-- write audio frames and patch up the file header
|
||||
close() -- patch up the file header and close the
|
||||
output file
|
||||
You should set the parameters before the first writeframesraw or
|
||||
writeframes. The total number of frames does not need to be set,
|
||||
but when it is set to the correct value, the header does not have to
|
||||
be patched up.
|
||||
It is best to first set all parameters, perhaps possibly the
|
||||
compression type, and then write audio frames using writeframesraw.
|
||||
When all frames have been written, either call writeframes('') or
|
||||
close() to patch up the sizes in the header.
|
||||
Marks can be added anytime. If there are any marks, you must call
|
||||
close() after all frames have been written.
|
||||
The close() method is called automatically when the class instance
|
||||
is destroyed.
|
||||
|
||||
When a file is opened with the extension '.aiff', an AIFF file is
|
||||
written, otherwise an AIFF-C file is written. This default can be
|
||||
changed by calling aiff() or aifc() before the first writeframes or
|
||||
writeframesraw.
|
||||
"""
|
||||
|
||||
import struct
|
||||
import __builtin__
|
||||
|
||||
__all__ = ["Error","open","openfp"]
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
|
||||
|
||||
def _read_long(file):
|
||||
try:
|
||||
return struct.unpack('>l', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_ulong(file):
|
||||
try:
|
||||
return struct.unpack('>L', file.read(4))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_short(file):
|
||||
try:
|
||||
return struct.unpack('>h', file.read(2))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_ushort(file):
|
||||
try:
|
||||
return struct.unpack('>H', file.read(2))[0]
|
||||
except struct.error:
|
||||
raise EOFError
|
||||
|
||||
def _read_string(file):
|
||||
length = ord(file.read(1))
|
||||
if length == 0:
|
||||
data = ''
|
||||
else:
|
||||
data = file.read(length)
|
||||
if length & 1 == 0:
|
||||
dummy = file.read(1)
|
||||
return data
|
||||
|
||||
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
|
||||
|
||||
def _read_float(f): # 10 bytes
|
||||
expon = _read_short(f) # 2 bytes
|
||||
sign = 1
|
||||
if expon < 0:
|
||||
sign = -1
|
||||
expon = expon + 0x8000
|
||||
himant = _read_ulong(f) # 4 bytes
|
||||
lomant = _read_ulong(f) # 4 bytes
|
||||
if expon == himant == lomant == 0:
|
||||
f = 0.0
|
||||
elif expon == 0x7FFF:
|
||||
f = _HUGE_VAL
|
||||
else:
|
||||
expon = expon - 16383
|
||||
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
|
||||
return sign * f
|
||||
|
||||
def _write_short(f, x):
|
||||
f.write(struct.pack('>h', x))
|
||||
|
||||
def _write_ushort(f, x):
|
||||
f.write(struct.pack('>H', x))
|
||||
|
||||
def _write_long(f, x):
|
||||
f.write(struct.pack('>l', x))
|
||||
|
||||
def _write_ulong(f, x):
|
||||
f.write(struct.pack('>L', x))
|
||||
|
||||
def _write_string(f, s):
|
||||
if len(s) > 255:
|
||||
raise ValueError("string exceeds maximum pstring length")
|
||||
f.write(struct.pack('B', len(s)))
|
||||
f.write(s)
|
||||
if len(s) & 1 == 0:
|
||||
f.write(chr(0))
|
||||
|
||||
def _write_float(f, x):
|
||||
import math
|
||||
if x < 0:
|
||||
sign = 0x8000
|
||||
x = x * -1
|
||||
else:
|
||||
sign = 0
|
||||
if x == 0:
|
||||
expon = 0
|
||||
himant = 0
|
||||
lomant = 0
|
||||
else:
|
||||
fmant, expon = math.frexp(x)
|
||||
if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
|
||||
expon = sign|0x7FFF
|
||||
himant = 0
|
||||
lomant = 0
|
||||
else: # Finite
|
||||
expon = expon + 16382
|
||||
if expon < 0: # denormalized
|
||||
fmant = math.ldexp(fmant, expon)
|
||||
expon = 0
|
||||
expon = expon | sign
|
||||
fmant = math.ldexp(fmant, 32)
|
||||
fsmant = math.floor(fmant)
|
||||
himant = long(fsmant)
|
||||
fmant = math.ldexp(fmant - fsmant, 32)
|
||||
fsmant = math.floor(fmant)
|
||||
lomant = long(fsmant)
|
||||
_write_ushort(f, expon)
|
||||
_write_ulong(f, himant)
|
||||
_write_ulong(f, lomant)
|
||||
|
||||
from chunk import Chunk
|
||||
|
||||
class Aifc_read:
|
||||
# Variables used in this class:
|
||||
#
|
||||
# These variables are available to the user though appropriate
|
||||
# methods of this class:
|
||||
# _file -- the open file with methods read(), close(), and seek()
|
||||
# set through the __init__() method
|
||||
# _nchannels -- the number of audio channels
|
||||
# available through the getnchannels() method
|
||||
# _nframes -- the number of audio frames
|
||||
# available through the getnframes() method
|
||||
# _sampwidth -- the number of bytes per audio sample
|
||||
# available through the getsampwidth() method
|
||||
# _framerate -- the sampling frequency
|
||||
# available through the getframerate() method
|
||||
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
|
||||
# available through the getcomptype() method
|
||||
# _compname -- the human-readable AIFF-C compression type
|
||||
# available through the getcomptype() method
|
||||
# _markers -- the marks in the audio file
|
||||
# available through the getmarkers() and getmark()
|
||||
# methods
|
||||
# _soundpos -- the position in the audio stream
|
||||
# available through the tell() method, set through the
|
||||
# setpos() method
|
||||
#
|
||||
# These variables are used internally only:
|
||||
# _version -- the AIFF-C version number
|
||||
# _decomp -- the decompressor from builtin module cl
|
||||
# _comm_chunk_read -- 1 iff the COMM chunk has been read
|
||||
# _aifc -- 1 iff reading an AIFF-C file
|
||||
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
|
||||
# file for readframes()
|
||||
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
|
||||
# _framesize -- size of one frame in the file
|
||||
|
||||
def initfp(self, file):
|
||||
self._version = 0
|
||||
self._decomp = None
|
||||
self._convert = None
|
||||
self._markers = []
|
||||
self._soundpos = 0
|
||||
self._file = file
|
||||
chunk = Chunk(file)
|
||||
if chunk.getname() != 'FORM':
|
||||
raise Error, 'file does not start with FORM id'
|
||||
formdata = chunk.read(4)
|
||||
if formdata == 'AIFF':
|
||||
self._aifc = 0
|
||||
elif formdata == 'AIFC':
|
||||
self._aifc = 1
|
||||
else:
|
||||
raise Error, 'not an AIFF or AIFF-C file'
|
||||
self._comm_chunk_read = 0
|
||||
while 1:
|
||||
self._ssnd_seek_needed = 1
|
||||
try:
|
||||
chunk = Chunk(self._file)
|
||||
except EOFError:
|
||||
break
|
||||
chunkname = chunk.getname()
|
||||
if chunkname == 'COMM':
|
||||
self._read_comm_chunk(chunk)
|
||||
self._comm_chunk_read = 1
|
||||
elif chunkname == 'SSND':
|
||||
self._ssnd_chunk = chunk
|
||||
dummy = chunk.read(8)
|
||||
self._ssnd_seek_needed = 0
|
||||
elif chunkname == 'FVER':
|
||||
self._version = _read_ulong(chunk)
|
||||
elif chunkname == 'MARK':
|
||||
self._readmark(chunk)
|
||||
chunk.skip()
|
||||
if not self._comm_chunk_read or not self._ssnd_chunk:
|
||||
raise Error, 'COMM chunk and/or SSND chunk missing'
|
||||
if self._aifc and self._decomp:
|
||||
import cl
|
||||
params = [cl.ORIGINAL_FORMAT, 0,
|
||||
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
|
||||
cl.FRAME_RATE, self._framerate]
|
||||
if self._nchannels == 1:
|
||||
params[1] = cl.MONO
|
||||
elif self._nchannels == 2:
|
||||
params[1] = cl.STEREO_INTERLEAVED
|
||||
else:
|
||||
raise Error, 'cannot compress more than 2 channels'
|
||||
self._decomp.SetParams(params)
|
||||
|
||||
def __init__(self, f):
|
||||
if type(f) == type(''):
|
||||
f = __builtin__.open(f, 'rb')
|
||||
# else, assume it is an open file object already
|
||||
self.initfp(f)
|
||||
|
||||
#
|
||||
# User visible methods.
|
||||
#
|
||||
def getfp(self):
|
||||
return self._file
|
||||
|
||||
def rewind(self):
|
||||
self._ssnd_seek_needed = 1
|
||||
self._soundpos = 0
|
||||
|
||||
def close(self):
|
||||
if self._decomp:
|
||||
self._decomp.CloseDecompressor()
|
||||
self._decomp = None
|
||||
self._file.close()
|
||||
|
||||
def tell(self):
|
||||
return self._soundpos
|
||||
|
||||
def getnchannels(self):
|
||||
return self._nchannels
|
||||
|
||||
def getnframes(self):
|
||||
return self._nframes
|
||||
|
||||
def getsampwidth(self):
|
||||
return self._sampwidth
|
||||
|
||||
def getframerate(self):
|
||||
return self._framerate
|
||||
|
||||
def getcomptype(self):
|
||||
return self._comptype
|
||||
|
||||
def getcompname(self):
|
||||
return self._compname
|
||||
|
||||
## def getversion(self):
|
||||
## return self._version
|
||||
|
||||
def getparams(self):
|
||||
return self.getnchannels(), self.getsampwidth(), \
|
||||
self.getframerate(), self.getnframes(), \
|
||||
self.getcomptype(), self.getcompname()
|
||||
|
||||
def getmarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return None
|
||||
return self._markers
|
||||
|
||||
def getmark(self, id):
|
||||
for marker in self._markers:
|
||||
if id == marker[0]:
|
||||
return marker
|
||||
raise Error, 'marker %r does not exist' % (id,)
|
||||
|
||||
def setpos(self, pos):
|
||||
if pos < 0 or pos > self._nframes:
|
||||
raise Error, 'position not in range'
|
||||
self._soundpos = pos
|
||||
self._ssnd_seek_needed = 1
|
||||
|
||||
def readframes(self, nframes):
|
||||
if self._ssnd_seek_needed:
|
||||
self._ssnd_chunk.seek(0)
|
||||
dummy = self._ssnd_chunk.read(8)
|
||||
pos = self._soundpos * self._framesize
|
||||
if pos:
|
||||
self._ssnd_chunk.seek(pos + 8)
|
||||
self._ssnd_seek_needed = 0
|
||||
if nframes == 0:
|
||||
return ''
|
||||
data = self._ssnd_chunk.read(nframes * self._framesize)
|
||||
if self._convert and data:
|
||||
data = self._convert(data)
|
||||
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
|
||||
return data
|
||||
|
||||
#
|
||||
# Internal methods.
|
||||
#
|
||||
|
||||
def _decomp_data(self, data):
|
||||
import cl
|
||||
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
|
||||
len(data) * 2)
|
||||
return self._decomp.Decompress(len(data) // self._nchannels,
|
||||
data)
|
||||
|
||||
def _ulaw2lin(self, data):
|
||||
import audioop
|
||||
return audioop.ulaw2lin(data, 2)
|
||||
|
||||
def _adpcm2lin(self, data):
|
||||
import audioop
|
||||
if not hasattr(self, '_adpcmstate'):
|
||||
# first time
|
||||
self._adpcmstate = None
|
||||
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
|
||||
self._adpcmstate)
|
||||
return data
|
||||
|
||||
def _read_comm_chunk(self, chunk):
|
||||
self._nchannels = _read_short(chunk)
|
||||
self._nframes = _read_long(chunk)
|
||||
self._sampwidth = (_read_short(chunk) + 7) // 8
|
||||
self._framerate = int(_read_float(chunk))
|
||||
self._framesize = self._nchannels * self._sampwidth
|
||||
if self._aifc:
|
||||
#DEBUG: SGI's soundeditor produces a bad size :-(
|
||||
kludge = 0
|
||||
if chunk.chunksize == 18:
|
||||
kludge = 1
|
||||
print 'Warning: bad COMM chunk size'
|
||||
chunk.chunksize = 23
|
||||
#DEBUG end
|
||||
self._comptype = chunk.read(4)
|
||||
#DEBUG start
|
||||
if kludge:
|
||||
length = ord(chunk.file.read(1))
|
||||
if length & 1 == 0:
|
||||
length = length + 1
|
||||
chunk.chunksize = chunk.chunksize + length
|
||||
chunk.file.seek(-1, 1)
|
||||
#DEBUG end
|
||||
self._compname = _read_string(chunk)
|
||||
if self._comptype != 'NONE':
|
||||
if self._comptype == 'G722':
|
||||
try:
|
||||
import audioop
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
self._convert = self._adpcm2lin
|
||||
self._sampwidth = 2
|
||||
return
|
||||
# for ULAW and ALAW try Compression Library
|
||||
try:
|
||||
import cl
|
||||
except ImportError:
|
||||
if self._comptype in ('ULAW', 'ulaw'):
|
||||
try:
|
||||
import audioop
|
||||
self._convert = self._ulaw2lin
|
||||
self._sampwidth = 2
|
||||
return
|
||||
except ImportError:
|
||||
pass
|
||||
raise Error, 'cannot read compressed AIFF-C files'
|
||||
if self._comptype in ('ULAW', 'ulaw'):
|
||||
scheme = cl.G711_ULAW
|
||||
elif self._comptype in ('ALAW', 'alaw'):
|
||||
scheme = cl.G711_ALAW
|
||||
else:
|
||||
raise Error, 'unsupported compression type'
|
||||
self._decomp = cl.OpenDecompressor(scheme)
|
||||
self._convert = self._decomp_data
|
||||
self._sampwidth = 2
|
||||
else:
|
||||
self._comptype = 'NONE'
|
||||
self._compname = 'not compressed'
|
||||
|
||||
def _readmark(self, chunk):
|
||||
nmarkers = _read_short(chunk)
|
||||
# Some files appear to contain invalid counts.
|
||||
# Cope with this by testing for EOF.
|
||||
try:
|
||||
for i in range(nmarkers):
|
||||
id = _read_short(chunk)
|
||||
pos = _read_long(chunk)
|
||||
name = _read_string(chunk)
|
||||
if pos or name:
|
||||
# some files appear to have
|
||||
# dummy markers consisting of
|
||||
# a position 0 and name ''
|
||||
self._markers.append((id, pos, name))
|
||||
except EOFError:
|
||||
print 'Warning: MARK chunk contains only',
|
||||
print len(self._markers),
|
||||
if len(self._markers) == 1: print 'marker',
|
||||
else: print 'markers',
|
||||
print 'instead of', nmarkers
|
||||
|
||||
class Aifc_write:
|
||||
# Variables used in this class:
|
||||
#
|
||||
# These variables are user settable through appropriate methods
|
||||
# of this class:
|
||||
# _file -- the open file with methods write(), close(), tell(), seek()
|
||||
# set through the __init__() method
|
||||
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
|
||||
# set through the setcomptype() or setparams() method
|
||||
# _compname -- the human-readable AIFF-C compression type
|
||||
# set through the setcomptype() or setparams() method
|
||||
# _nchannels -- the number of audio channels
|
||||
# set through the setnchannels() or setparams() method
|
||||
# _sampwidth -- the number of bytes per audio sample
|
||||
# set through the setsampwidth() or setparams() method
|
||||
# _framerate -- the sampling frequency
|
||||
# set through the setframerate() or setparams() method
|
||||
# _nframes -- the number of audio frames written to the header
|
||||
# set through the setnframes() or setparams() method
|
||||
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
|
||||
# set through the aifc() method, reset through the
|
||||
# aiff() method
|
||||
#
|
||||
# These variables are used internally only:
|
||||
# _version -- the AIFF-C version number
|
||||
# _comp -- the compressor from builtin module cl
|
||||
# _nframeswritten -- the number of audio frames actually written
|
||||
# _datalength -- the size of the audio samples written to the header
|
||||
# _datawritten -- the size of the audio samples actually written
|
||||
|
||||
def __init__(self, f):
|
||||
if type(f) == type(''):
|
||||
filename = f
|
||||
f = __builtin__.open(f, 'wb')
|
||||
else:
|
||||
# else, assume it is an open file object already
|
||||
filename = '???'
|
||||
self.initfp(f)
|
||||
if filename[-5:] == '.aiff':
|
||||
self._aifc = 0
|
||||
else:
|
||||
self._aifc = 1
|
||||
|
||||
def initfp(self, file):
|
||||
self._file = file
|
||||
self._version = _AIFC_version
|
||||
self._comptype = 'NONE'
|
||||
self._compname = 'not compressed'
|
||||
self._comp = None
|
||||
self._convert = None
|
||||
self._nchannels = 0
|
||||
self._sampwidth = 0
|
||||
self._framerate = 0
|
||||
self._nframes = 0
|
||||
self._nframeswritten = 0
|
||||
self._datawritten = 0
|
||||
self._datalength = 0
|
||||
self._markers = []
|
||||
self._marklength = 0
|
||||
self._aifc = 1 # AIFF-C is default
|
||||
|
||||
def __del__(self):
|
||||
if self._file:
|
||||
self.close()
|
||||
|
||||
#
|
||||
# User visible methods.
|
||||
#
|
||||
def aiff(self):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
self._aifc = 0
|
||||
|
||||
def aifc(self):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
self._aifc = 1
|
||||
|
||||
def setnchannels(self, nchannels):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
if nchannels < 1:
|
||||
raise Error, 'bad # of channels'
|
||||
self._nchannels = nchannels
|
||||
|
||||
def getnchannels(self):
|
||||
if not self._nchannels:
|
||||
raise Error, 'number of channels not set'
|
||||
return self._nchannels
|
||||
|
||||
def setsampwidth(self, sampwidth):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
if sampwidth < 1 or sampwidth > 4:
|
||||
raise Error, 'bad sample width'
|
||||
self._sampwidth = sampwidth
|
||||
|
||||
def getsampwidth(self):
|
||||
if not self._sampwidth:
|
||||
raise Error, 'sample width not set'
|
||||
return self._sampwidth
|
||||
|
||||
def setframerate(self, framerate):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
if framerate <= 0:
|
||||
raise Error, 'bad frame rate'
|
||||
self._framerate = framerate
|
||||
|
||||
def getframerate(self):
|
||||
if not self._framerate:
|
||||
raise Error, 'frame rate not set'
|
||||
return self._framerate
|
||||
|
||||
def setnframes(self, nframes):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
self._nframes = nframes
|
||||
|
||||
def getnframes(self):
|
||||
return self._nframeswritten
|
||||
|
||||
def setcomptype(self, comptype, compname):
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
|
||||
raise Error, 'unsupported compression type'
|
||||
self._comptype = comptype
|
||||
self._compname = compname
|
||||
|
||||
def getcomptype(self):
|
||||
return self._comptype
|
||||
|
||||
def getcompname(self):
|
||||
return self._compname
|
||||
|
||||
## def setversion(self, version):
|
||||
## if self._nframeswritten:
|
||||
## raise Error, 'cannot change parameters after starting to write'
|
||||
## self._version = version
|
||||
|
||||
def setparams(self, info):
|
||||
nchannels, sampwidth, framerate, nframes, comptype, compname = info
|
||||
if self._nframeswritten:
|
||||
raise Error, 'cannot change parameters after starting to write'
|
||||
if comptype not in ('NONE', 'ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
|
||||
raise Error, 'unsupported compression type'
|
||||
self.setnchannels(nchannels)
|
||||
self.setsampwidth(sampwidth)
|
||||
self.setframerate(framerate)
|
||||
self.setnframes(nframes)
|
||||
self.setcomptype(comptype, compname)
|
||||
|
||||
def getparams(self):
|
||||
if not self._nchannels or not self._sampwidth or not self._framerate:
|
||||
raise Error, 'not all parameters set'
|
||||
return self._nchannels, self._sampwidth, self._framerate, \
|
||||
self._nframes, self._comptype, self._compname
|
||||
|
||||
def setmark(self, id, pos, name):
|
||||
if id <= 0:
|
||||
raise Error, 'marker ID must be > 0'
|
||||
if pos < 0:
|
||||
raise Error, 'marker position must be >= 0'
|
||||
if type(name) != type(''):
|
||||
raise Error, 'marker name must be a string'
|
||||
for i in range(len(self._markers)):
|
||||
if id == self._markers[i][0]:
|
||||
self._markers[i] = id, pos, name
|
||||
return
|
||||
self._markers.append((id, pos, name))
|
||||
|
||||
def getmark(self, id):
|
||||
for marker in self._markers:
|
||||
if id == marker[0]:
|
||||
return marker
|
||||
raise Error, 'marker %r does not exist' % (id,)
|
||||
|
||||
def getmarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return None
|
||||
return self._markers
|
||||
|
||||
def tell(self):
|
||||
return self._nframeswritten
|
||||
|
||||
def writeframesraw(self, data):
|
||||
self._ensure_header_written(len(data))
|
||||
nframes = len(data) // (self._sampwidth * self._nchannels)
|
||||
if self._convert:
|
||||
data = self._convert(data)
|
||||
self._file.write(data)
|
||||
self._nframeswritten = self._nframeswritten + nframes
|
||||
self._datawritten = self._datawritten + len(data)
|
||||
|
||||
def writeframes(self, data):
|
||||
self.writeframesraw(data)
|
||||
if self._nframeswritten != self._nframes or \
|
||||
self._datalength != self._datawritten:
|
||||
self._patchheader()
|
||||
|
||||
def close(self):
|
||||
if self._file is None:
|
||||
return
|
||||
try:
|
||||
self._ensure_header_written(0)
|
||||
if self._datawritten & 1:
|
||||
# quick pad to even size
|
||||
self._file.write(chr(0))
|
||||
self._datawritten = self._datawritten + 1
|
||||
self._writemarkers()
|
||||
if self._nframeswritten != self._nframes or \
|
||||
self._datalength != self._datawritten or \
|
||||
self._marklength:
|
||||
self._patchheader()
|
||||
if self._comp:
|
||||
self._comp.CloseCompressor()
|
||||
self._comp = None
|
||||
finally:
|
||||
# Prevent ref cycles
|
||||
self._convert = None
|
||||
f = self._file
|
||||
self._file = None
|
||||
f.close()
|
||||
|
||||
#
|
||||
# Internal methods.
|
||||
#
|
||||
|
||||
def _comp_data(self, data):
|
||||
import cl
|
||||
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
|
||||
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
|
||||
return self._comp.Compress(self._nframes, data)
|
||||
|
||||
def _lin2ulaw(self, data):
|
||||
import audioop
|
||||
return audioop.lin2ulaw(data, 2)
|
||||
|
||||
def _lin2adpcm(self, data):
|
||||
import audioop
|
||||
if not hasattr(self, '_adpcmstate'):
|
||||
self._adpcmstate = None
|
||||
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
|
||||
self._adpcmstate)
|
||||
return data
|
||||
|
||||
def _ensure_header_written(self, datasize):
|
||||
if not self._nframeswritten:
|
||||
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
|
||||
if not self._sampwidth:
|
||||
self._sampwidth = 2
|
||||
if self._sampwidth != 2:
|
||||
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
|
||||
if self._comptype == 'G722':
|
||||
if not self._sampwidth:
|
||||
self._sampwidth = 2
|
||||
if self._sampwidth != 2:
|
||||
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
|
||||
if not self._nchannels:
|
||||
raise Error, '# channels not specified'
|
||||
if not self._sampwidth:
|
||||
raise Error, 'sample width not specified'
|
||||
if not self._framerate:
|
||||
raise Error, 'sampling rate not specified'
|
||||
self._write_header(datasize)
|
||||
|
||||
def _init_compression(self):
|
||||
if self._comptype == 'G722':
|
||||
self._convert = self._lin2adpcm
|
||||
return
|
||||
try:
|
||||
import cl
|
||||
except ImportError:
|
||||
if self._comptype in ('ULAW', 'ulaw'):
|
||||
try:
|
||||
import audioop
|
||||
self._convert = self._lin2ulaw
|
||||
return
|
||||
except ImportError:
|
||||
pass
|
||||
raise Error, 'cannot write compressed AIFF-C files'
|
||||
if self._comptype in ('ULAW', 'ulaw'):
|
||||
scheme = cl.G711_ULAW
|
||||
elif self._comptype in ('ALAW', 'alaw'):
|
||||
scheme = cl.G711_ALAW
|
||||
else:
|
||||
raise Error, 'unsupported compression type'
|
||||
self._comp = cl.OpenCompressor(scheme)
|
||||
params = [cl.ORIGINAL_FORMAT, 0,
|
||||
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
|
||||
cl.FRAME_RATE, self._framerate,
|
||||
cl.FRAME_BUFFER_SIZE, 100,
|
||||
cl.COMPRESSED_BUFFER_SIZE, 100]
|
||||
if self._nchannels == 1:
|
||||
params[1] = cl.MONO
|
||||
elif self._nchannels == 2:
|
||||
params[1] = cl.STEREO_INTERLEAVED
|
||||
else:
|
||||
raise Error, 'cannot compress more than 2 channels'
|
||||
self._comp.SetParams(params)
|
||||
# the compressor produces a header which we ignore
|
||||
dummy = self._comp.Compress(0, '')
|
||||
self._convert = self._comp_data
|
||||
|
||||
def _write_header(self, initlength):
|
||||
if self._aifc and self._comptype != 'NONE':
|
||||
self._init_compression()
|
||||
self._file.write('FORM')
|
||||
if not self._nframes:
|
||||
self._nframes = initlength // (self._nchannels * self._sampwidth)
|
||||
self._datalength = self._nframes * self._nchannels * self._sampwidth
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
if self._aifc:
|
||||
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw'):
|
||||
self._datalength = self._datalength // 2
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
elif self._comptype == 'G722':
|
||||
self._datalength = (self._datalength + 3) // 4
|
||||
if self._datalength & 1:
|
||||
self._datalength = self._datalength + 1
|
||||
try:
|
||||
self._form_length_pos = self._file.tell()
|
||||
except (AttributeError, IOError):
|
||||
self._form_length_pos = None
|
||||
commlength = self._write_form_length(self._datalength)
|
||||
if self._aifc:
|
||||
self._file.write('AIFC')
|
||||
self._file.write('FVER')
|
||||
_write_ulong(self._file, 4)
|
||||
_write_ulong(self._file, self._version)
|
||||
else:
|
||||
self._file.write('AIFF')
|
||||
self._file.write('COMM')
|
||||
_write_ulong(self._file, commlength)
|
||||
_write_short(self._file, self._nchannels)
|
||||
if self._form_length_pos is not None:
|
||||
self._nframes_pos = self._file.tell()
|
||||
_write_ulong(self._file, self._nframes)
|
||||
if self._comptype in ('ULAW', 'ulaw', 'ALAW', 'alaw', 'G722'):
|
||||
_write_short(self._file, 8)
|
||||
else:
|
||||
_write_short(self._file, self._sampwidth * 8)
|
||||
_write_float(self._file, self._framerate)
|
||||
if self._aifc:
|
||||
self._file.write(self._comptype)
|
||||
_write_string(self._file, self._compname)
|
||||
self._file.write('SSND')
|
||||
if self._form_length_pos is not None:
|
||||
self._ssnd_length_pos = self._file.tell()
|
||||
_write_ulong(self._file, self._datalength + 8)
|
||||
_write_ulong(self._file, 0)
|
||||
_write_ulong(self._file, 0)
|
||||
|
||||
def _write_form_length(self, datalength):
|
||||
if self._aifc:
|
||||
commlength = 18 + 5 + len(self._compname)
|
||||
if commlength & 1:
|
||||
commlength = commlength + 1
|
||||
verslength = 12
|
||||
else:
|
||||
commlength = 18
|
||||
verslength = 0
|
||||
_write_ulong(self._file, 4 + verslength + self._marklength + \
|
||||
8 + commlength + 16 + datalength)
|
||||
return commlength
|
||||
|
||||
def _patchheader(self):
|
||||
curpos = self._file.tell()
|
||||
if self._datawritten & 1:
|
||||
datalength = self._datawritten + 1
|
||||
self._file.write(chr(0))
|
||||
else:
|
||||
datalength = self._datawritten
|
||||
if datalength == self._datalength and \
|
||||
self._nframes == self._nframeswritten and \
|
||||
self._marklength == 0:
|
||||
self._file.seek(curpos, 0)
|
||||
return
|
||||
self._file.seek(self._form_length_pos, 0)
|
||||
dummy = self._write_form_length(datalength)
|
||||
self._file.seek(self._nframes_pos, 0)
|
||||
_write_ulong(self._file, self._nframeswritten)
|
||||
self._file.seek(self._ssnd_length_pos, 0)
|
||||
_write_ulong(self._file, datalength + 8)
|
||||
self._file.seek(curpos, 0)
|
||||
self._nframes = self._nframeswritten
|
||||
self._datalength = datalength
|
||||
|
||||
def _writemarkers(self):
|
||||
if len(self._markers) == 0:
|
||||
return
|
||||
self._file.write('MARK')
|
||||
length = 2
|
||||
for marker in self._markers:
|
||||
id, pos, name = marker
|
||||
length = length + len(name) + 1 + 6
|
||||
if len(name) & 1 == 0:
|
||||
length = length + 1
|
||||
_write_ulong(self._file, length)
|
||||
self._marklength = length + 8
|
||||
_write_short(self._file, len(self._markers))
|
||||
for marker in self._markers:
|
||||
id, pos, name = marker
|
||||
_write_short(self._file, id)
|
||||
_write_ulong(self._file, pos)
|
||||
_write_string(self._file, name)
|
||||
|
||||
def open(f, mode=None):
|
||||
if mode is None:
|
||||
if hasattr(f, 'mode'):
|
||||
mode = f.mode
|
||||
else:
|
||||
mode = 'rb'
|
||||
if mode in ('r', 'rb'):
|
||||
return Aifc_read(f)
|
||||
elif mode in ('w', 'wb'):
|
||||
return Aifc_write(f)
|
||||
else:
|
||||
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
|
||||
|
||||
openfp = open # B/W compatibility
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
if not sys.argv[1:]:
|
||||
sys.argv.append('/usr/demos/data/audio/bach.aiff')
|
||||
fn = sys.argv[1]
|
||||
f = open(fn, 'r')
|
||||
try:
|
||||
print "Reading", fn
|
||||
print "nchannels =", f.getnchannels()
|
||||
print "nframes =", f.getnframes()
|
||||
print "sampwidth =", f.getsampwidth()
|
||||
print "framerate =", f.getframerate()
|
||||
print "comptype =", f.getcomptype()
|
||||
print "compname =", f.getcompname()
|
||||
if sys.argv[2:]:
|
||||
gn = sys.argv[2]
|
||||
print "Writing", gn
|
||||
g = open(gn, 'w')
|
||||
try:
|
||||
g.setparams(f.getparams())
|
||||
while 1:
|
||||
data = f.readframes(1024)
|
||||
if not data:
|
||||
break
|
||||
g.writeframes(data)
|
||||
finally:
|
||||
g.close()
|
||||
print "Done."
|
||||
finally:
|
||||
f.close()
|
Binary file not shown.
4
PythonHome/Lib/antigravity.py
Normal file
4
PythonHome/Lib/antigravity.py
Normal file
@ -0,0 +1,4 @@
|
||||
|
||||
import webbrowser
|
||||
|
||||
webbrowser.open("http://xkcd.com/353/")
|
Binary file not shown.
Binary file not shown.
2367
PythonHome/Lib/argparse.py
Normal file
2367
PythonHome/Lib/argparse.py
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
311
PythonHome/Lib/ast.py
Normal file
311
PythonHome/Lib/ast.py
Normal file
@ -0,0 +1,311 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
ast
|
||||
~~~
|
||||
|
||||
The `ast` module helps Python applications to process trees of the Python
|
||||
abstract syntax grammar. The abstract syntax itself might change with
|
||||
each Python release; this module helps to find out programmatically what
|
||||
the current grammar looks like and allows modifications of it.
|
||||
|
||||
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
|
||||
a flag to the `compile()` builtin function or by using the `parse()`
|
||||
function from this module. The result will be a tree of objects whose
|
||||
classes all inherit from `ast.AST`.
|
||||
|
||||
A modified abstract syntax tree can be compiled into a Python code object
|
||||
using the built-in `compile()` function.
|
||||
|
||||
Additionally various helper functions are provided that make working with
|
||||
the trees simpler. The main intention of the helper functions and this
|
||||
module in general is to provide an easy to use interface for libraries
|
||||
that work tightly with the python syntax (template engines for example).
|
||||
|
||||
|
||||
:copyright: Copyright 2008 by Armin Ronacher.
|
||||
:license: Python License.
|
||||
"""
|
||||
from _ast import *
|
||||
from _ast import __version__
|
||||
|
||||
|
||||
def parse(source, filename='<unknown>', mode='exec'):
|
||||
"""
|
||||
Parse the source into an AST node.
|
||||
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
|
||||
"""
|
||||
return compile(source, filename, mode, PyCF_ONLY_AST)
|
||||
|
||||
|
||||
def literal_eval(node_or_string):
|
||||
"""
|
||||
Safely evaluate an expression node or a string containing a Python
|
||||
expression. The string or node provided may only consist of the following
|
||||
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
|
||||
and None.
|
||||
"""
|
||||
_safe_names = {'None': None, 'True': True, 'False': False}
|
||||
if isinstance(node_or_string, basestring):
|
||||
node_or_string = parse(node_or_string, mode='eval')
|
||||
if isinstance(node_or_string, Expression):
|
||||
node_or_string = node_or_string.body
|
||||
def _convert(node):
|
||||
if isinstance(node, Str):
|
||||
return node.s
|
||||
elif isinstance(node, Num):
|
||||
return node.n
|
||||
elif isinstance(node, Tuple):
|
||||
return tuple(map(_convert, node.elts))
|
||||
elif isinstance(node, List):
|
||||
return list(map(_convert, node.elts))
|
||||
elif isinstance(node, Dict):
|
||||
return dict((_convert(k), _convert(v)) for k, v
|
||||
in zip(node.keys, node.values))
|
||||
elif isinstance(node, Name):
|
||||
if node.id in _safe_names:
|
||||
return _safe_names[node.id]
|
||||
elif isinstance(node, BinOp) and \
|
||||
isinstance(node.op, (Add, Sub)) and \
|
||||
isinstance(node.right, Num) and \
|
||||
isinstance(node.right.n, complex) and \
|
||||
isinstance(node.left, Num) and \
|
||||
isinstance(node.left.n, (int, long, float)):
|
||||
left = node.left.n
|
||||
right = node.right.n
|
||||
if isinstance(node.op, Add):
|
||||
return left + right
|
||||
else:
|
||||
return left - right
|
||||
raise ValueError('malformed string')
|
||||
return _convert(node_or_string)
|
||||
|
||||
|
||||
def dump(node, annotate_fields=True, include_attributes=False):
|
||||
"""
|
||||
Return a formatted dump of the tree in *node*. This is mainly useful for
|
||||
debugging purposes. The returned string will show the names and the values
|
||||
for fields. This makes the code impossible to evaluate, so if evaluation is
|
||||
wanted *annotate_fields* must be set to False. Attributes such as line
|
||||
numbers and column offsets are not dumped by default. If this is wanted,
|
||||
*include_attributes* can be set to True.
|
||||
"""
|
||||
def _format(node):
|
||||
if isinstance(node, AST):
|
||||
fields = [(a, _format(b)) for a, b in iter_fields(node)]
|
||||
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
|
||||
('%s=%s' % field for field in fields)
|
||||
if annotate_fields else
|
||||
(b for a, b in fields)
|
||||
))
|
||||
if include_attributes and node._attributes:
|
||||
rv += fields and ', ' or ' '
|
||||
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
|
||||
for a in node._attributes)
|
||||
return rv + ')'
|
||||
elif isinstance(node, list):
|
||||
return '[%s]' % ', '.join(_format(x) for x in node)
|
||||
return repr(node)
|
||||
if not isinstance(node, AST):
|
||||
raise TypeError('expected AST, got %r' % node.__class__.__name__)
|
||||
return _format(node)
|
||||
|
||||
|
||||
def copy_location(new_node, old_node):
|
||||
"""
|
||||
Copy source location (`lineno` and `col_offset` attributes) from
|
||||
*old_node* to *new_node* if possible, and return *new_node*.
|
||||
"""
|
||||
for attr in 'lineno', 'col_offset':
|
||||
if attr in old_node._attributes and attr in new_node._attributes \
|
||||
and hasattr(old_node, attr):
|
||||
setattr(new_node, attr, getattr(old_node, attr))
|
||||
return new_node
|
||||
|
||||
|
||||
def fix_missing_locations(node):
|
||||
"""
|
||||
When you compile a node tree with compile(), the compiler expects lineno and
|
||||
col_offset attributes for every node that supports them. This is rather
|
||||
tedious to fill in for generated nodes, so this helper adds these attributes
|
||||
recursively where not already set, by setting them to the values of the
|
||||
parent node. It works recursively starting at *node*.
|
||||
"""
|
||||
def _fix(node, lineno, col_offset):
|
||||
if 'lineno' in node._attributes:
|
||||
if not hasattr(node, 'lineno'):
|
||||
node.lineno = lineno
|
||||
else:
|
||||
lineno = node.lineno
|
||||
if 'col_offset' in node._attributes:
|
||||
if not hasattr(node, 'col_offset'):
|
||||
node.col_offset = col_offset
|
||||
else:
|
||||
col_offset = node.col_offset
|
||||
for child in iter_child_nodes(node):
|
||||
_fix(child, lineno, col_offset)
|
||||
_fix(node, 1, 0)
|
||||
return node
|
||||
|
||||
|
||||
def increment_lineno(node, n=1):
|
||||
"""
|
||||
Increment the line number of each node in the tree starting at *node* by *n*.
|
||||
This is useful to "move code" to a different location in a file.
|
||||
"""
|
||||
for child in walk(node):
|
||||
if 'lineno' in child._attributes:
|
||||
child.lineno = getattr(child, 'lineno', 0) + n
|
||||
return node
|
||||
|
||||
|
||||
def iter_fields(node):
|
||||
"""
|
||||
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
|
||||
that is present on *node*.
|
||||
"""
|
||||
for field in node._fields:
|
||||
try:
|
||||
yield field, getattr(node, field)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def iter_child_nodes(node):
|
||||
"""
|
||||
Yield all direct child nodes of *node*, that is, all fields that are nodes
|
||||
and all items of fields that are lists of nodes.
|
||||
"""
|
||||
for name, field in iter_fields(node):
|
||||
if isinstance(field, AST):
|
||||
yield field
|
||||
elif isinstance(field, list):
|
||||
for item in field:
|
||||
if isinstance(item, AST):
|
||||
yield item
|
||||
|
||||
|
||||
def get_docstring(node, clean=True):
|
||||
"""
|
||||
Return the docstring for the given node or None if no docstring can
|
||||
be found. If the node provided does not have docstrings a TypeError
|
||||
will be raised.
|
||||
"""
|
||||
if not isinstance(node, (FunctionDef, ClassDef, Module)):
|
||||
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
|
||||
if node.body and isinstance(node.body[0], Expr) and \
|
||||
isinstance(node.body[0].value, Str):
|
||||
if clean:
|
||||
import inspect
|
||||
return inspect.cleandoc(node.body[0].value.s)
|
||||
return node.body[0].value.s
|
||||
|
||||
|
||||
def walk(node):
|
||||
"""
|
||||
Recursively yield all descendant nodes in the tree starting at *node*
|
||||
(including *node* itself), in no specified order. This is useful if you
|
||||
only want to modify nodes in place and don't care about the context.
|
||||
"""
|
||||
from collections import deque
|
||||
todo = deque([node])
|
||||
while todo:
|
||||
node = todo.popleft()
|
||||
todo.extend(iter_child_nodes(node))
|
||||
yield node
|
||||
|
||||
|
||||
class NodeVisitor(object):
|
||||
"""
|
||||
A node visitor base class that walks the abstract syntax tree and calls a
|
||||
visitor function for every node found. This function may return a value
|
||||
which is forwarded by the `visit` method.
|
||||
|
||||
This class is meant to be subclassed, with the subclass adding visitor
|
||||
methods.
|
||||
|
||||
Per default the visitor functions for the nodes are ``'visit_'`` +
|
||||
class name of the node. So a `TryFinally` node visit function would
|
||||
be `visit_TryFinally`. This behavior can be changed by overriding
|
||||
the `visit` method. If no visitor function exists for a node
|
||||
(return value `None`) the `generic_visit` visitor is used instead.
|
||||
|
||||
Don't use the `NodeVisitor` if you want to apply changes to nodes during
|
||||
traversing. For this a special visitor exists (`NodeTransformer`) that
|
||||
allows modifications.
|
||||
"""
|
||||
|
||||
def visit(self, node):
|
||||
"""Visit a node."""
|
||||
method = 'visit_' + node.__class__.__name__
|
||||
visitor = getattr(self, method, self.generic_visit)
|
||||
return visitor(node)
|
||||
|
||||
def generic_visit(self, node):
|
||||
"""Called if no explicit visitor function exists for a node."""
|
||||
for field, value in iter_fields(node):
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
if isinstance(item, AST):
|
||||
self.visit(item)
|
||||
elif isinstance(value, AST):
|
||||
self.visit(value)
|
||||
|
||||
|
||||
class NodeTransformer(NodeVisitor):
|
||||
"""
|
||||
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
|
||||
allows modification of nodes.
|
||||
|
||||
The `NodeTransformer` will walk the AST and use the return value of the
|
||||
visitor methods to replace or remove the old node. If the return value of
|
||||
the visitor method is ``None``, the node will be removed from its location,
|
||||
otherwise it is replaced with the return value. The return value may be the
|
||||
original node in which case no replacement takes place.
|
||||
|
||||
Here is an example transformer that rewrites all occurrences of name lookups
|
||||
(``foo``) to ``data['foo']``::
|
||||
|
||||
class RewriteName(NodeTransformer):
|
||||
|
||||
def visit_Name(self, node):
|
||||
return copy_location(Subscript(
|
||||
value=Name(id='data', ctx=Load()),
|
||||
slice=Index(value=Str(s=node.id)),
|
||||
ctx=node.ctx
|
||||
), node)
|
||||
|
||||
Keep in mind that if the node you're operating on has child nodes you must
|
||||
either transform the child nodes yourself or call the :meth:`generic_visit`
|
||||
method for the node first.
|
||||
|
||||
For nodes that were part of a collection of statements (that applies to all
|
||||
statement nodes), the visitor may also return a list of nodes rather than
|
||||
just a single node.
|
||||
|
||||
Usually you use the transformer like this::
|
||||
|
||||
node = YourTransformer().visit(node)
|
||||
"""
|
||||
|
||||
def generic_visit(self, node):
|
||||
for field, old_value in iter_fields(node):
|
||||
old_value = getattr(node, field, None)
|
||||
if isinstance(old_value, list):
|
||||
new_values = []
|
||||
for value in old_value:
|
||||
if isinstance(value, AST):
|
||||
value = self.visit(value)
|
||||
if value is None:
|
||||
continue
|
||||
elif not isinstance(value, AST):
|
||||
new_values.extend(value)
|
||||
continue
|
||||
new_values.append(value)
|
||||
old_value[:] = new_values
|
||||
elif isinstance(old_value, AST):
|
||||
new_node = self.visit(old_value)
|
||||
if new_node is None:
|
||||
delattr(node, field)
|
||||
else:
|
||||
setattr(node, field, new_node)
|
||||
return node
|
Binary file not shown.
314
PythonHome/Lib/asynchat.py
Normal file
314
PythonHome/Lib/asynchat.py
Normal file
@ -0,0 +1,314 @@
|
||||
# -*- Mode: Python; tab-width: 4 -*-
|
||||
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
r"""A class supporting chat-style (command/response) protocols.
|
||||
|
||||
This class adds support for 'chat' style protocols - where one side
|
||||
sends a 'command', and the other sends a response (examples would be
|
||||
the common internet protocols - smtp, nntp, ftp, etc..).
|
||||
|
||||
The handle_read() method looks at the input stream for the current
|
||||
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
|
||||
for multi-line output), calling self.found_terminator() on its
|
||||
receipt.
|
||||
|
||||
for example:
|
||||
Say you build an async nntp client using this class. At the start
|
||||
of the connection, you'll have self.terminator set to '\r\n', in
|
||||
order to process the single-line greeting. Just before issuing a
|
||||
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
|
||||
command will be accumulated (using your own 'collect_incoming_data'
|
||||
method) up to the terminator, and then control will be returned to
|
||||
you - by calling your self.found_terminator() method.
|
||||
"""
|
||||
|
||||
import socket
|
||||
import asyncore
|
||||
from collections import deque
|
||||
from sys import py3kwarning
|
||||
from warnings import filterwarnings, catch_warnings
|
||||
|
||||
class async_chat (asyncore.dispatcher):
|
||||
"""This is an abstract class. You must derive from this class, and add
|
||||
the two methods collect_incoming_data() and found_terminator()"""
|
||||
|
||||
# these are overridable defaults
|
||||
|
||||
ac_in_buffer_size = 4096
|
||||
ac_out_buffer_size = 4096
|
||||
|
||||
def __init__ (self, sock=None, map=None):
|
||||
# for string terminator matching
|
||||
self.ac_in_buffer = ''
|
||||
|
||||
# we use a list here rather than cStringIO for a few reasons...
|
||||
# del lst[:] is faster than sio.truncate(0)
|
||||
# lst = [] is faster than sio.truncate(0)
|
||||
# cStringIO will be gaining unicode support in py3k, which
|
||||
# will negatively affect the performance of bytes compared to
|
||||
# a ''.join() equivalent
|
||||
self.incoming = []
|
||||
|
||||
# we toss the use of the "simple producer" and replace it with
|
||||
# a pure deque, which the original fifo was a wrapping of
|
||||
self.producer_fifo = deque()
|
||||
asyncore.dispatcher.__init__ (self, sock, map)
|
||||
|
||||
def collect_incoming_data(self, data):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def _collect_incoming_data(self, data):
|
||||
self.incoming.append(data)
|
||||
|
||||
def _get_data(self):
|
||||
d = ''.join(self.incoming)
|
||||
del self.incoming[:]
|
||||
return d
|
||||
|
||||
def found_terminator(self):
|
||||
raise NotImplementedError("must be implemented in subclass")
|
||||
|
||||
def set_terminator (self, term):
|
||||
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
|
||||
self.terminator = term
|
||||
|
||||
def get_terminator (self):
|
||||
return self.terminator
|
||||
|
||||
# grab some more data from the socket,
|
||||
# throw it to the collector method,
|
||||
# check for the terminator,
|
||||
# if found, transition to the next state.
|
||||
|
||||
def handle_read (self):
|
||||
|
||||
try:
|
||||
data = self.recv (self.ac_in_buffer_size)
|
||||
except socket.error, why:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
self.ac_in_buffer = self.ac_in_buffer + data
|
||||
|
||||
# Continue to search for self.terminator in self.ac_in_buffer,
|
||||
# while calling self.collect_incoming_data. The while loop
|
||||
# is necessary because we might read several data+terminator
|
||||
# combos with a single recv(4096).
|
||||
|
||||
while self.ac_in_buffer:
|
||||
lb = len(self.ac_in_buffer)
|
||||
terminator = self.get_terminator()
|
||||
if not terminator:
|
||||
# no terminator, collect it all
|
||||
self.collect_incoming_data (self.ac_in_buffer)
|
||||
self.ac_in_buffer = ''
|
||||
elif isinstance(terminator, int) or isinstance(terminator, long):
|
||||
# numeric terminator
|
||||
n = terminator
|
||||
if lb < n:
|
||||
self.collect_incoming_data (self.ac_in_buffer)
|
||||
self.ac_in_buffer = ''
|
||||
self.terminator = self.terminator - lb
|
||||
else:
|
||||
self.collect_incoming_data (self.ac_in_buffer[:n])
|
||||
self.ac_in_buffer = self.ac_in_buffer[n:]
|
||||
self.terminator = 0
|
||||
self.found_terminator()
|
||||
else:
|
||||
# 3 cases:
|
||||
# 1) end of buffer matches terminator exactly:
|
||||
# collect data, transition
|
||||
# 2) end of buffer matches some prefix:
|
||||
# collect data to the prefix
|
||||
# 3) end of buffer does not match any prefix:
|
||||
# collect data
|
||||
terminator_len = len(terminator)
|
||||
index = self.ac_in_buffer.find(terminator)
|
||||
if index != -1:
|
||||
# we found the terminator
|
||||
if index > 0:
|
||||
# don't bother reporting the empty string (source of subtle bugs)
|
||||
self.collect_incoming_data (self.ac_in_buffer[:index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
|
||||
# This does the Right Thing if the terminator is changed here.
|
||||
self.found_terminator()
|
||||
else:
|
||||
# check for a prefix of the terminator
|
||||
index = find_prefix_at_end (self.ac_in_buffer, terminator)
|
||||
if index:
|
||||
if index != lb:
|
||||
# we found a prefix, collect up to the prefix
|
||||
self.collect_incoming_data (self.ac_in_buffer[:-index])
|
||||
self.ac_in_buffer = self.ac_in_buffer[-index:]
|
||||
break
|
||||
else:
|
||||
# no prefix, collect it all
|
||||
self.collect_incoming_data (self.ac_in_buffer)
|
||||
self.ac_in_buffer = ''
|
||||
|
||||
def handle_write (self):
|
||||
self.initiate_send()
|
||||
|
||||
def handle_close (self):
|
||||
self.close()
|
||||
|
||||
def push (self, data):
|
||||
sabs = self.ac_out_buffer_size
|
||||
if len(data) > sabs:
|
||||
for i in xrange(0, len(data), sabs):
|
||||
self.producer_fifo.append(data[i:i+sabs])
|
||||
else:
|
||||
self.producer_fifo.append(data)
|
||||
self.initiate_send()
|
||||
|
||||
def push_with_producer (self, producer):
|
||||
self.producer_fifo.append(producer)
|
||||
self.initiate_send()
|
||||
|
||||
def readable (self):
|
||||
"predicate for inclusion in the readable for select()"
|
||||
# cannot use the old predicate, it violates the claim of the
|
||||
# set_terminator method.
|
||||
|
||||
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
|
||||
return 1
|
||||
|
||||
def writable (self):
|
||||
"predicate for inclusion in the writable for select()"
|
||||
return self.producer_fifo or (not self.connected)
|
||||
|
||||
def close_when_done (self):
|
||||
"automatically close this channel once the outgoing queue is empty"
|
||||
self.producer_fifo.append(None)
|
||||
|
||||
def initiate_send(self):
|
||||
while self.producer_fifo and self.connected:
|
||||
first = self.producer_fifo[0]
|
||||
# handle empty string/buffer or None entry
|
||||
if not first:
|
||||
del self.producer_fifo[0]
|
||||
if first is None:
|
||||
self.handle_close()
|
||||
return
|
||||
|
||||
# handle classic producer behavior
|
||||
obs = self.ac_out_buffer_size
|
||||
try:
|
||||
with catch_warnings():
|
||||
if py3kwarning:
|
||||
filterwarnings("ignore", ".*buffer", DeprecationWarning)
|
||||
data = buffer(first, 0, obs)
|
||||
except TypeError:
|
||||
data = first.more()
|
||||
if data:
|
||||
self.producer_fifo.appendleft(data)
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
continue
|
||||
|
||||
# send the data
|
||||
try:
|
||||
num_sent = self.send(data)
|
||||
except socket.error:
|
||||
self.handle_error()
|
||||
return
|
||||
|
||||
if num_sent:
|
||||
if num_sent < len(data) or obs < len(first):
|
||||
self.producer_fifo[0] = first[num_sent:]
|
||||
else:
|
||||
del self.producer_fifo[0]
|
||||
# we tried to send some actual data
|
||||
return
|
||||
|
||||
def discard_buffers (self):
|
||||
# Emergencies only!
|
||||
self.ac_in_buffer = ''
|
||||
del self.incoming[:]
|
||||
self.producer_fifo.clear()
|
||||
|
||||
class simple_producer:
|
||||
|
||||
def __init__ (self, data, buffer_size=512):
|
||||
self.data = data
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def more (self):
|
||||
if len (self.data) > self.buffer_size:
|
||||
result = self.data[:self.buffer_size]
|
||||
self.data = self.data[self.buffer_size:]
|
||||
return result
|
||||
else:
|
||||
result = self.data
|
||||
self.data = ''
|
||||
return result
|
||||
|
||||
class fifo:
|
||||
def __init__ (self, list=None):
|
||||
if not list:
|
||||
self.list = deque()
|
||||
else:
|
||||
self.list = deque(list)
|
||||
|
||||
def __len__ (self):
|
||||
return len(self.list)
|
||||
|
||||
def is_empty (self):
|
||||
return not self.list
|
||||
|
||||
def first (self):
|
||||
return self.list[0]
|
||||
|
||||
def push (self, data):
|
||||
self.list.append(data)
|
||||
|
||||
def pop (self):
|
||||
if self.list:
|
||||
return (1, self.list.popleft())
|
||||
else:
|
||||
return (0, None)
|
||||
|
||||
# Given 'haystack', see if any prefix of 'needle' is at its end. This
|
||||
# assumes an exact match has already been checked. Return the number of
|
||||
# characters matched.
|
||||
# for example:
|
||||
# f_p_a_e ("qwerty\r", "\r\n") => 1
|
||||
# f_p_a_e ("qwertydkjf", "\r\n") => 0
|
||||
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
|
||||
|
||||
# this could maybe be made faster with a computed regex?
|
||||
# [answer: no; circa Python-2.0, Jan 2001]
|
||||
# new python: 28961/s
|
||||
# old python: 18307/s
|
||||
# re: 12820/s
|
||||
# regex: 14035/s
|
||||
|
||||
def find_prefix_at_end (haystack, needle):
|
||||
l = len(needle) - 1
|
||||
while l and not haystack.endswith(needle[:l]):
|
||||
l -= 1
|
||||
return l
|
Binary file not shown.
659
PythonHome/Lib/asyncore.py
Normal file
659
PythonHome/Lib/asyncore.py
Normal file
@ -0,0 +1,659 @@
|
||||
# -*- Mode: Python -*-
|
||||
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
|
||||
# Author: Sam Rushing <rushing@nightmare.com>
|
||||
|
||||
# ======================================================================
|
||||
# Copyright 1996 by Sam Rushing
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software and
|
||||
# its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of Sam
|
||||
# Rushing not be used in advertising or publicity pertaining to
|
||||
# distribution of the software without specific, written prior
|
||||
# permission.
|
||||
#
|
||||
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
|
||||
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
|
||||
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
|
||||
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
|
||||
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
# ======================================================================
|
||||
|
||||
"""Basic infrastructure for asynchronous socket service clients and servers.
|
||||
|
||||
There are only two ways to have a program on a single processor do "more
|
||||
than one thing at a time". Multi-threaded programming is the simplest and
|
||||
most popular way to do it, but there is another very different technique,
|
||||
that lets you have nearly all the advantages of multi-threading, without
|
||||
actually using multiple threads. it's really only practical if your program
|
||||
is largely I/O bound. If your program is CPU bound, then pre-emptive
|
||||
scheduled threads are probably what you really need. Network servers are
|
||||
rarely CPU-bound, however.
|
||||
|
||||
If your operating system supports the select() system call in its I/O
|
||||
library (and nearly all do), then you can use it to juggle multiple
|
||||
communication channels at once; doing other work while your I/O is taking
|
||||
place in the "background." Although this strategy can seem strange and
|
||||
complex, especially at first, it is in many ways easier to understand and
|
||||
control than multi-threaded programming. The module documented here solves
|
||||
many of the difficult problems for you, making the task of building
|
||||
sophisticated high-performance network servers and clients a snap.
|
||||
"""
|
||||
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import os
|
||||
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
|
||||
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
|
||||
errorcode
|
||||
|
||||
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
|
||||
EBADF))
|
||||
|
||||
try:
|
||||
socket_map
|
||||
except NameError:
|
||||
socket_map = {}
|
||||
|
||||
def _strerror(err):
|
||||
try:
|
||||
return os.strerror(err)
|
||||
except (ValueError, OverflowError, NameError):
|
||||
if err in errorcode:
|
||||
return errorcode[err]
|
||||
return "Unknown error %s" %err
|
||||
|
||||
class ExitNow(Exception):
|
||||
pass
|
||||
|
||||
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
|
||||
|
||||
def read(obj):
|
||||
try:
|
||||
obj.handle_read_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def write(obj):
|
||||
try:
|
||||
obj.handle_write_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def _exception(obj):
|
||||
try:
|
||||
obj.handle_expt_event()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def readwrite(obj, flags):
|
||||
try:
|
||||
if flags & select.POLLIN:
|
||||
obj.handle_read_event()
|
||||
if flags & select.POLLOUT:
|
||||
obj.handle_write_event()
|
||||
if flags & select.POLLPRI:
|
||||
obj.handle_expt_event()
|
||||
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
|
||||
obj.handle_close()
|
||||
except socket.error, e:
|
||||
if e.args[0] not in _DISCONNECTED:
|
||||
obj.handle_error()
|
||||
else:
|
||||
obj.handle_close()
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
obj.handle_error()
|
||||
|
||||
def poll(timeout=0.0, map=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if map:
|
||||
r = []; w = []; e = []
|
||||
for fd, obj in map.items():
|
||||
is_r = obj.readable()
|
||||
is_w = obj.writable()
|
||||
if is_r:
|
||||
r.append(fd)
|
||||
# accepting sockets should not be writable
|
||||
if is_w and not obj.accepting:
|
||||
w.append(fd)
|
||||
if is_r or is_w:
|
||||
e.append(fd)
|
||||
if [] == r == w == e:
|
||||
time.sleep(timeout)
|
||||
return
|
||||
|
||||
try:
|
||||
r, w, e = select.select(r, w, e, timeout)
|
||||
except select.error, err:
|
||||
if err.args[0] != EINTR:
|
||||
raise
|
||||
else:
|
||||
return
|
||||
|
||||
for fd in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
read(obj)
|
||||
|
||||
for fd in w:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
write(obj)
|
||||
|
||||
for fd in e:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
_exception(obj)
|
||||
|
||||
def poll2(timeout=0.0, map=None):
|
||||
# Use the poll() support added to the select module in Python 2.0
|
||||
if map is None:
|
||||
map = socket_map
|
||||
if timeout is not None:
|
||||
# timeout is in milliseconds
|
||||
timeout = int(timeout*1000)
|
||||
pollster = select.poll()
|
||||
if map:
|
||||
for fd, obj in map.items():
|
||||
flags = 0
|
||||
if obj.readable():
|
||||
flags |= select.POLLIN | select.POLLPRI
|
||||
# accepting sockets should not be writable
|
||||
if obj.writable() and not obj.accepting:
|
||||
flags |= select.POLLOUT
|
||||
if flags:
|
||||
# Only check for exceptions if object was either readable
|
||||
# or writable.
|
||||
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
|
||||
pollster.register(fd, flags)
|
||||
try:
|
||||
r = pollster.poll(timeout)
|
||||
except select.error, err:
|
||||
if err.args[0] != EINTR:
|
||||
raise
|
||||
r = []
|
||||
for fd, flags in r:
|
||||
obj = map.get(fd)
|
||||
if obj is None:
|
||||
continue
|
||||
readwrite(obj, flags)
|
||||
|
||||
poll3 = poll2 # Alias for backward compatibility
|
||||
|
||||
def loop(timeout=30.0, use_poll=False, map=None, count=None):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
|
||||
if use_poll and hasattr(select, 'poll'):
|
||||
poll_fun = poll2
|
||||
else:
|
||||
poll_fun = poll
|
||||
|
||||
if count is None:
|
||||
while map:
|
||||
poll_fun(timeout, map)
|
||||
|
||||
else:
|
||||
while map and count > 0:
|
||||
poll_fun(timeout, map)
|
||||
count = count - 1
|
||||
|
||||
class dispatcher:
|
||||
|
||||
debug = False
|
||||
connected = False
|
||||
accepting = False
|
||||
connecting = False
|
||||
closing = False
|
||||
addr = None
|
||||
ignore_log_types = frozenset(['warning'])
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
if map is None:
|
||||
self._map = socket_map
|
||||
else:
|
||||
self._map = map
|
||||
|
||||
self._fileno = None
|
||||
|
||||
if sock:
|
||||
# Set to nonblocking just to make sure for cases where we
|
||||
# get a socket from a blocking source.
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock, map)
|
||||
self.connected = True
|
||||
# The constructor no longer requires that the socket
|
||||
# passed be connected.
|
||||
try:
|
||||
self.addr = sock.getpeername()
|
||||
except socket.error, err:
|
||||
if err.args[0] in (ENOTCONN, EINVAL):
|
||||
# To handle the case where we got an unconnected
|
||||
# socket.
|
||||
self.connected = False
|
||||
else:
|
||||
# The socket is broken in some unknown way, alert
|
||||
# the user and remove it from the map (to prevent
|
||||
# polling of broken sockets).
|
||||
self.del_channel(map)
|
||||
raise
|
||||
else:
|
||||
self.socket = None
|
||||
|
||||
def __repr__(self):
|
||||
status = [self.__class__.__module__+"."+self.__class__.__name__]
|
||||
if self.accepting and self.addr:
|
||||
status.append('listening')
|
||||
elif self.connected:
|
||||
status.append('connected')
|
||||
if self.addr is not None:
|
||||
try:
|
||||
status.append('%s:%d' % self.addr)
|
||||
except TypeError:
|
||||
status.append(repr(self.addr))
|
||||
return '<%s at %#x>' % (' '.join(status), id(self))
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
def add_channel(self, map=None):
|
||||
#self.log_info('adding channel %s' % self)
|
||||
if map is None:
|
||||
map = self._map
|
||||
map[self._fileno] = self
|
||||
|
||||
def del_channel(self, map=None):
|
||||
fd = self._fileno
|
||||
if map is None:
|
||||
map = self._map
|
||||
if fd in map:
|
||||
#self.log_info('closing channel %d:%s' % (fd, self))
|
||||
del map[fd]
|
||||
self._fileno = None
|
||||
|
||||
def create_socket(self, family, type):
|
||||
self.family_and_type = family, type
|
||||
sock = socket.socket(family, type)
|
||||
sock.setblocking(0)
|
||||
self.set_socket(sock)
|
||||
|
||||
def set_socket(self, sock, map=None):
|
||||
self.socket = sock
|
||||
## self.__dict__['socket'] = sock
|
||||
self._fileno = sock.fileno()
|
||||
self.add_channel(map)
|
||||
|
||||
def set_reuse_addr(self):
|
||||
# try to re-use a server port if possible
|
||||
try:
|
||||
self.socket.setsockopt(
|
||||
socket.SOL_SOCKET, socket.SO_REUSEADDR,
|
||||
self.socket.getsockopt(socket.SOL_SOCKET,
|
||||
socket.SO_REUSEADDR) | 1
|
||||
)
|
||||
except socket.error:
|
||||
pass
|
||||
|
||||
# ==================================================
|
||||
# predicates for select()
|
||||
# these are used as filters for the lists of sockets
|
||||
# to pass to select().
|
||||
# ==================================================
|
||||
|
||||
def readable(self):
|
||||
return True
|
||||
|
||||
def writable(self):
|
||||
return True
|
||||
|
||||
# ==================================================
|
||||
# socket object methods.
|
||||
# ==================================================
|
||||
|
||||
def listen(self, num):
|
||||
self.accepting = True
|
||||
if os.name == 'nt' and num > 5:
|
||||
num = 5
|
||||
return self.socket.listen(num)
|
||||
|
||||
def bind(self, addr):
|
||||
self.addr = addr
|
||||
return self.socket.bind(addr)
|
||||
|
||||
def connect(self, address):
|
||||
self.connected = False
|
||||
self.connecting = True
|
||||
err = self.socket.connect_ex(address)
|
||||
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
|
||||
or err == EINVAL and os.name in ('nt', 'ce'):
|
||||
self.addr = address
|
||||
return
|
||||
if err in (0, EISCONN):
|
||||
self.addr = address
|
||||
self.handle_connect_event()
|
||||
else:
|
||||
raise socket.error(err, errorcode[err])
|
||||
|
||||
def accept(self):
|
||||
# XXX can return either an address pair or None
|
||||
try:
|
||||
conn, addr = self.socket.accept()
|
||||
except TypeError:
|
||||
return None
|
||||
except socket.error as why:
|
||||
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
return conn, addr
|
||||
|
||||
def send(self, data):
|
||||
try:
|
||||
result = self.socket.send(data)
|
||||
return result
|
||||
except socket.error, why:
|
||||
if why.args[0] == EWOULDBLOCK:
|
||||
return 0
|
||||
elif why.args[0] in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return 0
|
||||
else:
|
||||
raise
|
||||
|
||||
def recv(self, buffer_size):
|
||||
try:
|
||||
data = self.socket.recv(buffer_size)
|
||||
if not data:
|
||||
# a closed connection is indicated by signaling
|
||||
# a read condition, and having recv() return 0.
|
||||
self.handle_close()
|
||||
return ''
|
||||
else:
|
||||
return data
|
||||
except socket.error, why:
|
||||
# winsock sometimes raises ENOTCONN
|
||||
if why.args[0] in _DISCONNECTED:
|
||||
self.handle_close()
|
||||
return ''
|
||||
else:
|
||||
raise
|
||||
|
||||
def close(self):
|
||||
self.connected = False
|
||||
self.accepting = False
|
||||
self.connecting = False
|
||||
self.del_channel()
|
||||
try:
|
||||
self.socket.close()
|
||||
except socket.error, why:
|
||||
if why.args[0] not in (ENOTCONN, EBADF):
|
||||
raise
|
||||
|
||||
# cheap inheritance, used to pass all other attribute
|
||||
# references to the underlying socket object.
|
||||
def __getattr__(self, attr):
|
||||
try:
|
||||
retattr = getattr(self.socket, attr)
|
||||
except AttributeError:
|
||||
raise AttributeError("%s instance has no attribute '%s'"
|
||||
%(self.__class__.__name__, attr))
|
||||
else:
|
||||
msg = "%(me)s.%(attr)s is deprecated. Use %(me)s.socket.%(attr)s " \
|
||||
"instead." % {'me': self.__class__.__name__, 'attr':attr}
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=2)
|
||||
return retattr
|
||||
|
||||
# log and log_info may be overridden to provide more sophisticated
|
||||
# logging and warning methods. In general, log is for 'hit' logging
|
||||
# and 'log_info' is for informational, warning and error logging.
|
||||
|
||||
def log(self, message):
|
||||
sys.stderr.write('log: %s\n' % str(message))
|
||||
|
||||
def log_info(self, message, type='info'):
|
||||
if type not in self.ignore_log_types:
|
||||
print '%s: %s' % (type, message)
|
||||
|
||||
def handle_read_event(self):
|
||||
if self.accepting:
|
||||
# accepting sockets are never connected, they "spawn" new
|
||||
# sockets that are connected
|
||||
self.handle_accept()
|
||||
elif not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_read()
|
||||
else:
|
||||
self.handle_read()
|
||||
|
||||
def handle_connect_event(self):
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
raise socket.error(err, _strerror(err))
|
||||
self.handle_connect()
|
||||
self.connected = True
|
||||
self.connecting = False
|
||||
|
||||
def handle_write_event(self):
|
||||
if self.accepting:
|
||||
# Accepting sockets shouldn't get a write event.
|
||||
# We will pretend it didn't happen.
|
||||
return
|
||||
|
||||
if not self.connected:
|
||||
if self.connecting:
|
||||
self.handle_connect_event()
|
||||
self.handle_write()
|
||||
|
||||
def handle_expt_event(self):
|
||||
# handle_expt_event() is called if there might be an error on the
|
||||
# socket, or if there is OOB data
|
||||
# check for the error condition first
|
||||
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
|
||||
if err != 0:
|
||||
# we can get here when select.select() says that there is an
|
||||
# exceptional condition on the socket
|
||||
# since there is an error, we'll go ahead and close the socket
|
||||
# like we would in a subclassed handle_read() that received no
|
||||
# data
|
||||
self.handle_close()
|
||||
else:
|
||||
self.handle_expt()
|
||||
|
||||
def handle_error(self):
|
||||
nil, t, v, tbinfo = compact_traceback()
|
||||
|
||||
# sometimes a user repr method will crash.
|
||||
try:
|
||||
self_repr = repr(self)
|
||||
except:
|
||||
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
|
||||
|
||||
self.log_info(
|
||||
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
|
||||
self_repr,
|
||||
t,
|
||||
v,
|
||||
tbinfo
|
||||
),
|
||||
'error'
|
||||
)
|
||||
self.handle_close()
|
||||
|
||||
def handle_expt(self):
|
||||
self.log_info('unhandled incoming priority event', 'warning')
|
||||
|
||||
def handle_read(self):
|
||||
self.log_info('unhandled read event', 'warning')
|
||||
|
||||
def handle_write(self):
|
||||
self.log_info('unhandled write event', 'warning')
|
||||
|
||||
def handle_connect(self):
|
||||
self.log_info('unhandled connect event', 'warning')
|
||||
|
||||
def handle_accept(self):
|
||||
self.log_info('unhandled accept event', 'warning')
|
||||
|
||||
def handle_close(self):
|
||||
self.log_info('unhandled close event', 'warning')
|
||||
self.close()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# adds simple buffered output capability, useful for simple clients.
|
||||
# [for more sophisticated usage use asynchat.async_chat]
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class dispatcher_with_send(dispatcher):
|
||||
|
||||
def __init__(self, sock=None, map=None):
|
||||
dispatcher.__init__(self, sock, map)
|
||||
self.out_buffer = ''
|
||||
|
||||
def initiate_send(self):
|
||||
num_sent = 0
|
||||
num_sent = dispatcher.send(self, self.out_buffer[:512])
|
||||
self.out_buffer = self.out_buffer[num_sent:]
|
||||
|
||||
def handle_write(self):
|
||||
self.initiate_send()
|
||||
|
||||
def writable(self):
|
||||
return (not self.connected) or len(self.out_buffer)
|
||||
|
||||
def send(self, data):
|
||||
if self.debug:
|
||||
self.log_info('sending %s' % repr(data))
|
||||
self.out_buffer = self.out_buffer + data
|
||||
self.initiate_send()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# used for debugging.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def compact_traceback():
|
||||
t, v, tb = sys.exc_info()
|
||||
tbinfo = []
|
||||
if not tb: # Must have a traceback
|
||||
raise AssertionError("traceback does not exist")
|
||||
while tb:
|
||||
tbinfo.append((
|
||||
tb.tb_frame.f_code.co_filename,
|
||||
tb.tb_frame.f_code.co_name,
|
||||
str(tb.tb_lineno)
|
||||
))
|
||||
tb = tb.tb_next
|
||||
|
||||
# just to be safe
|
||||
del tb
|
||||
|
||||
file, function, line = tbinfo[-1]
|
||||
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
|
||||
return (file, function, line), t, v, info
|
||||
|
||||
def close_all(map=None, ignore_all=False):
|
||||
if map is None:
|
||||
map = socket_map
|
||||
for x in map.values():
|
||||
try:
|
||||
x.close()
|
||||
except OSError, x:
|
||||
if x.args[0] == EBADF:
|
||||
pass
|
||||
elif not ignore_all:
|
||||
raise
|
||||
except _reraised_exceptions:
|
||||
raise
|
||||
except:
|
||||
if not ignore_all:
|
||||
raise
|
||||
map.clear()
|
||||
|
||||
# Asynchronous File I/O:
|
||||
#
|
||||
# After a little research (reading man pages on various unixen, and
|
||||
# digging through the linux kernel), I've determined that select()
|
||||
# isn't meant for doing asynchronous file i/o.
|
||||
# Heartening, though - reading linux/mm/filemap.c shows that linux
|
||||
# supports asynchronous read-ahead. So _MOST_ of the time, the data
|
||||
# will be sitting in memory for us already when we go to read it.
|
||||
#
|
||||
# What other OS's (besides NT) support async file i/o? [VMS?]
|
||||
#
|
||||
# Regardless, this is useful for pipes, and stdin/stdout...
|
||||
|
||||
if os.name == 'posix':
|
||||
import fcntl
|
||||
|
||||
class file_wrapper:
|
||||
# Here we override just enough to make a file
|
||||
# look like a socket for the purposes of asyncore.
|
||||
# The passed fd is automatically os.dup()'d
|
||||
|
||||
def __init__(self, fd):
|
||||
self.fd = os.dup(fd)
|
||||
|
||||
def recv(self, *args):
|
||||
return os.read(self.fd, *args)
|
||||
|
||||
def send(self, *args):
|
||||
return os.write(self.fd, *args)
|
||||
|
||||
def getsockopt(self, level, optname, buflen=None):
|
||||
if (level == socket.SOL_SOCKET and
|
||||
optname == socket.SO_ERROR and
|
||||
not buflen):
|
||||
return 0
|
||||
raise NotImplementedError("Only asyncore specific behaviour "
|
||||
"implemented.")
|
||||
|
||||
read = recv
|
||||
write = send
|
||||
|
||||
def close(self):
|
||||
os.close(self.fd)
|
||||
|
||||
def fileno(self):
|
||||
return self.fd
|
||||
|
||||
class file_dispatcher(dispatcher):
|
||||
|
||||
def __init__(self, fd, map=None):
|
||||
dispatcher.__init__(self, None, map)
|
||||
self.connected = True
|
||||
try:
|
||||
fd = fd.fileno()
|
||||
except AttributeError:
|
||||
pass
|
||||
self.set_file(fd)
|
||||
# set it to non-blocking mode
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
|
||||
flags = flags | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
def set_file(self, fd):
|
||||
self.socket = file_wrapper(fd)
|
||||
self._fileno = self.socket.fileno()
|
||||
self.add_channel()
|
Binary file not shown.
65
PythonHome/Lib/atexit.py
Normal file
65
PythonHome/Lib/atexit.py
Normal file
@ -0,0 +1,65 @@
|
||||
"""
|
||||
atexit.py - allow programmer to define multiple exit functions to be executed
|
||||
upon normal program termination.
|
||||
|
||||
One public function, register, is defined.
|
||||
"""
|
||||
|
||||
__all__ = ["register"]
|
||||
|
||||
import sys
|
||||
|
||||
_exithandlers = []
|
||||
def _run_exitfuncs():
|
||||
"""run any registered exit functions
|
||||
|
||||
_exithandlers is traversed in reverse order so functions are executed
|
||||
last in, first out.
|
||||
"""
|
||||
|
||||
exc_info = None
|
||||
while _exithandlers:
|
||||
func, targs, kargs = _exithandlers.pop()
|
||||
try:
|
||||
func(*targs, **kargs)
|
||||
except SystemExit:
|
||||
exc_info = sys.exc_info()
|
||||
except:
|
||||
import traceback
|
||||
print >> sys.stderr, "Error in atexit._run_exitfuncs:"
|
||||
traceback.print_exc()
|
||||
exc_info = sys.exc_info()
|
||||
|
||||
if exc_info is not None:
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
|
||||
|
||||
def register(func, *targs, **kargs):
|
||||
"""register a function to be executed upon normal program termination
|
||||
|
||||
func - function to be called at exit
|
||||
targs - optional arguments to pass to func
|
||||
kargs - optional keyword arguments to pass to func
|
||||
|
||||
func is returned to facilitate usage as a decorator.
|
||||
"""
|
||||
_exithandlers.append((func, targs, kargs))
|
||||
return func
|
||||
|
||||
if hasattr(sys, "exitfunc"):
|
||||
# Assume it's another registered exit function - append it to our list
|
||||
register(sys.exitfunc)
|
||||
sys.exitfunc = _run_exitfuncs
|
||||
|
||||
if __name__ == "__main__":
|
||||
def x1():
|
||||
print "running x1"
|
||||
def x2(n):
|
||||
print "running x2(%r)" % (n,)
|
||||
def x3(n, kwd=None):
|
||||
print "running x3(%r, kwd=%r)" % (n, kwd)
|
||||
|
||||
register(x1)
|
||||
register(x2, 12)
|
||||
register(x3, 5, "bar")
|
||||
register(x3, "no kwd args")
|
Binary file not shown.
260
PythonHome/Lib/audiodev.py
Normal file
260
PythonHome/Lib/audiodev.py
Normal file
@ -0,0 +1,260 @@
|
||||
"""Classes for manipulating audio devices (currently only for Sun and SGI)"""
|
||||
from warnings import warnpy3k
|
||||
warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2)
|
||||
del warnpy3k
|
||||
|
||||
__all__ = ["error","AudioDev"]
|
||||
|
||||
class error(Exception):
|
||||
pass
|
||||
|
||||
class Play_Audio_sgi:
|
||||
# Private instance variables
|
||||
## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
|
||||
## params, config, inited_outrate, inited_width, \
|
||||
## inited_nchannels, port, converter, classinited: private
|
||||
|
||||
classinited = 0
|
||||
frameratelist = nchannelslist = sampwidthlist = None
|
||||
|
||||
def initclass(self):
|
||||
import AL
|
||||
self.frameratelist = [
|
||||
(48000, AL.RATE_48000),
|
||||
(44100, AL.RATE_44100),
|
||||
(32000, AL.RATE_32000),
|
||||
(22050, AL.RATE_22050),
|
||||
(16000, AL.RATE_16000),
|
||||
(11025, AL.RATE_11025),
|
||||
( 8000, AL.RATE_8000),
|
||||
]
|
||||
self.nchannelslist = [
|
||||
(1, AL.MONO),
|
||||
(2, AL.STEREO),
|
||||
(4, AL.QUADRO),
|
||||
]
|
||||
self.sampwidthlist = [
|
||||
(1, AL.SAMPLE_8),
|
||||
(2, AL.SAMPLE_16),
|
||||
(3, AL.SAMPLE_24),
|
||||
]
|
||||
self.classinited = 1
|
||||
|
||||
def __init__(self):
|
||||
import al, AL
|
||||
if not self.classinited:
|
||||
self.initclass()
|
||||
self.oldparams = []
|
||||
self.params = [AL.OUTPUT_RATE, 0]
|
||||
self.config = al.newconfig()
|
||||
self.inited_outrate = 0
|
||||
self.inited_width = 0
|
||||
self.inited_nchannels = 0
|
||||
self.converter = None
|
||||
self.port = None
|
||||
return
|
||||
|
||||
def __del__(self):
|
||||
if self.port:
|
||||
self.stop()
|
||||
if self.oldparams:
|
||||
import al, AL
|
||||
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
|
||||
self.oldparams = []
|
||||
|
||||
def wait(self):
|
||||
if not self.port:
|
||||
return
|
||||
import time
|
||||
while self.port.getfilled() > 0:
|
||||
time.sleep(0.1)
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
if self.port:
|
||||
self.port.closeport()
|
||||
self.port = None
|
||||
if self.oldparams:
|
||||
import al, AL
|
||||
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
|
||||
self.oldparams = []
|
||||
|
||||
def setoutrate(self, rate):
|
||||
for (raw, cooked) in self.frameratelist:
|
||||
if rate == raw:
|
||||
self.params[1] = cooked
|
||||
self.inited_outrate = 1
|
||||
break
|
||||
else:
|
||||
raise error, 'bad output rate'
|
||||
|
||||
def setsampwidth(self, width):
|
||||
for (raw, cooked) in self.sampwidthlist:
|
||||
if width == raw:
|
||||
self.config.setwidth(cooked)
|
||||
self.inited_width = 1
|
||||
break
|
||||
else:
|
||||
if width == 0:
|
||||
import AL
|
||||
self.inited_width = 0
|
||||
self.config.setwidth(AL.SAMPLE_16)
|
||||
self.converter = self.ulaw2lin
|
||||
else:
|
||||
raise error, 'bad sample width'
|
||||
|
||||
def setnchannels(self, nchannels):
|
||||
for (raw, cooked) in self.nchannelslist:
|
||||
if nchannels == raw:
|
||||
self.config.setchannels(cooked)
|
||||
self.inited_nchannels = 1
|
||||
break
|
||||
else:
|
||||
raise error, 'bad # of channels'
|
||||
|
||||
def writeframes(self, data):
|
||||
if not (self.inited_outrate and self.inited_nchannels):
|
||||
raise error, 'params not specified'
|
||||
if not self.port:
|
||||
import al, AL
|
||||
self.port = al.openport('Python', 'w', self.config)
|
||||
self.oldparams = self.params[:]
|
||||
al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
|
||||
al.setparams(AL.DEFAULT_DEVICE, self.params)
|
||||
if self.converter:
|
||||
data = self.converter(data)
|
||||
self.port.writesamps(data)
|
||||
|
||||
def getfilled(self):
|
||||
if self.port:
|
||||
return self.port.getfilled()
|
||||
else:
|
||||
return 0
|
||||
|
||||
def getfillable(self):
|
||||
if self.port:
|
||||
return self.port.getfillable()
|
||||
else:
|
||||
return self.config.getqueuesize()
|
||||
|
||||
# private methods
|
||||
## if 0: access *: private
|
||||
|
||||
def ulaw2lin(self, data):
|
||||
import audioop
|
||||
return audioop.ulaw2lin(data, 2)
|
||||
|
||||
class Play_Audio_sun:
|
||||
## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
|
||||
## inited_nchannels, converter: private
|
||||
|
||||
def __init__(self):
|
||||
self.outrate = 0
|
||||
self.sampwidth = 0
|
||||
self.nchannels = 0
|
||||
self.inited_outrate = 0
|
||||
self.inited_width = 0
|
||||
self.inited_nchannels = 0
|
||||
self.converter = None
|
||||
self.port = None
|
||||
return
|
||||
|
||||
def __del__(self):
|
||||
self.stop()
|
||||
|
||||
def setoutrate(self, rate):
|
||||
self.outrate = rate
|
||||
self.inited_outrate = 1
|
||||
|
||||
def setsampwidth(self, width):
|
||||
self.sampwidth = width
|
||||
self.inited_width = 1
|
||||
|
||||
def setnchannels(self, nchannels):
|
||||
self.nchannels = nchannels
|
||||
self.inited_nchannels = 1
|
||||
|
||||
def writeframes(self, data):
|
||||
if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
|
||||
raise error, 'params not specified'
|
||||
if not self.port:
|
||||
import sunaudiodev, SUNAUDIODEV
|
||||
self.port = sunaudiodev.open('w')
|
||||
info = self.port.getinfo()
|
||||
info.o_sample_rate = self.outrate
|
||||
info.o_channels = self.nchannels
|
||||
if self.sampwidth == 0:
|
||||
info.o_precision = 8
|
||||
self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
|
||||
# XXX Hack, hack -- leave defaults
|
||||
else:
|
||||
info.o_precision = 8 * self.sampwidth
|
||||
info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
|
||||
self.port.setinfo(info)
|
||||
if self.converter:
|
||||
data = self.converter(data)
|
||||
self.port.write(data)
|
||||
|
||||
def wait(self):
|
||||
if not self.port:
|
||||
return
|
||||
self.port.drain()
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
if self.port:
|
||||
self.port.flush()
|
||||
self.port.close()
|
||||
self.port = None
|
||||
|
||||
def getfilled(self):
|
||||
if self.port:
|
||||
return self.port.obufcount()
|
||||
else:
|
||||
return 0
|
||||
|
||||
## # Nobody remembers what this method does, and it's broken. :-(
|
||||
## def getfillable(self):
|
||||
## return BUFFERSIZE - self.getfilled()
|
||||
|
||||
def AudioDev():
|
||||
# Dynamically try to import and use a platform specific module.
|
||||
try:
|
||||
import al
|
||||
except ImportError:
|
||||
try:
|
||||
import sunaudiodev
|
||||
return Play_Audio_sun()
|
||||
except ImportError:
|
||||
try:
|
||||
import Audio_mac
|
||||
except ImportError:
|
||||
raise error, 'no audio device'
|
||||
else:
|
||||
return Audio_mac.Play_Audio_mac()
|
||||
else:
|
||||
return Play_Audio_sgi()
|
||||
|
||||
def test(fn = None):
|
||||
import sys
|
||||
if sys.argv[1:]:
|
||||
fn = sys.argv[1]
|
||||
else:
|
||||
fn = 'f:just samples:just.aif'
|
||||
import aifc
|
||||
af = aifc.open(fn, 'r')
|
||||
print fn, af.getparams()
|
||||
p = AudioDev()
|
||||
p.setoutrate(af.getframerate())
|
||||
p.setsampwidth(af.getsampwidth())
|
||||
p.setnchannels(af.getnchannels())
|
||||
BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
|
||||
while 1:
|
||||
data = af.readframes(BUFSIZ)
|
||||
if not data: break
|
||||
print len(data)
|
||||
p.writeframes(data)
|
||||
p.wait()
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
Binary file not shown.
360
PythonHome/Lib/base64.py
Normal file
360
PythonHome/Lib/base64.py
Normal file
@ -0,0 +1,360 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
|
||||
|
||||
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
|
||||
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
|
||||
|
||||
import re
|
||||
import struct
|
||||
import binascii
|
||||
|
||||
|
||||
__all__ = [
|
||||
# Legacy interface exports traditional RFC 1521 Base64 encodings
|
||||
'encode', 'decode', 'encodestring', 'decodestring',
|
||||
# Generalized interface for other encodings
|
||||
'b64encode', 'b64decode', 'b32encode', 'b32decode',
|
||||
'b16encode', 'b16decode',
|
||||
# Standard Base64 encoding
|
||||
'standard_b64encode', 'standard_b64decode',
|
||||
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
|
||||
# starting at:
|
||||
#
|
||||
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
|
||||
'urlsafe_b64encode', 'urlsafe_b64decode',
|
||||
]
|
||||
|
||||
_translation = [chr(_x) for _x in range(256)]
|
||||
EMPTYSTRING = ''
|
||||
|
||||
|
||||
def _translate(s, altchars):
|
||||
translation = _translation[:]
|
||||
for k, v in altchars.items():
|
||||
translation[ord(k)] = v
|
||||
return s.translate(''.join(translation))
|
||||
|
||||
|
||||
|
||||
# Base64 encoding/decoding uses binascii
|
||||
|
||||
def b64encode(s, altchars=None):
|
||||
"""Encode a string using Base64.
|
||||
|
||||
s is the string to encode. Optional altchars must be a string of at least
|
||||
length 2 (additional characters are ignored) which specifies an
|
||||
alternative alphabet for the '+' and '/' characters. This allows an
|
||||
application to e.g. generate url or filesystem safe Base64 strings.
|
||||
|
||||
The encoded string is returned.
|
||||
"""
|
||||
# Strip off the trailing newline
|
||||
encoded = binascii.b2a_base64(s)[:-1]
|
||||
if altchars is not None:
|
||||
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
|
||||
return encoded
|
||||
|
||||
|
||||
def b64decode(s, altchars=None):
|
||||
"""Decode a Base64 encoded string.
|
||||
|
||||
s is the string to decode. Optional altchars must be a string of at least
|
||||
length 2 (additional characters are ignored) which specifies the
|
||||
alternative alphabet used instead of the '+' and '/' characters.
|
||||
|
||||
The decoded string is returned. A TypeError is raised if s were
|
||||
incorrectly padded or if there are non-alphabet characters present in the
|
||||
string.
|
||||
"""
|
||||
if altchars is not None:
|
||||
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
|
||||
try:
|
||||
return binascii.a2b_base64(s)
|
||||
except binascii.Error, msg:
|
||||
# Transform this exception for consistency
|
||||
raise TypeError(msg)
|
||||
|
||||
|
||||
def standard_b64encode(s):
|
||||
"""Encode a string using the standard Base64 alphabet.
|
||||
|
||||
s is the string to encode. The encoded string is returned.
|
||||
"""
|
||||
return b64encode(s)
|
||||
|
||||
def standard_b64decode(s):
|
||||
"""Decode a string encoded with the standard Base64 alphabet.
|
||||
|
||||
s is the string to decode. The decoded string is returned. A TypeError
|
||||
is raised if the string is incorrectly padded or if there are non-alphabet
|
||||
characters present in the string.
|
||||
"""
|
||||
return b64decode(s)
|
||||
|
||||
def urlsafe_b64encode(s):
|
||||
"""Encode a string using a url-safe Base64 alphabet.
|
||||
|
||||
s is the string to encode. The encoded string is returned. The alphabet
|
||||
uses '-' instead of '+' and '_' instead of '/'.
|
||||
"""
|
||||
return b64encode(s, '-_')
|
||||
|
||||
def urlsafe_b64decode(s):
|
||||
"""Decode a string encoded with the standard Base64 alphabet.
|
||||
|
||||
s is the string to decode. The decoded string is returned. A TypeError
|
||||
is raised if the string is incorrectly padded or if there are non-alphabet
|
||||
characters present in the string.
|
||||
|
||||
The alphabet uses '-' instead of '+' and '_' instead of '/'.
|
||||
"""
|
||||
return b64decode(s, '-_')
|
||||
|
||||
|
||||
|
||||
# Base32 encoding/decoding must be done in Python
|
||||
_b32alphabet = {
|
||||
0: 'A', 9: 'J', 18: 'S', 27: '3',
|
||||
1: 'B', 10: 'K', 19: 'T', 28: '4',
|
||||
2: 'C', 11: 'L', 20: 'U', 29: '5',
|
||||
3: 'D', 12: 'M', 21: 'V', 30: '6',
|
||||
4: 'E', 13: 'N', 22: 'W', 31: '7',
|
||||
5: 'F', 14: 'O', 23: 'X',
|
||||
6: 'G', 15: 'P', 24: 'Y',
|
||||
7: 'H', 16: 'Q', 25: 'Z',
|
||||
8: 'I', 17: 'R', 26: '2',
|
||||
}
|
||||
|
||||
_b32tab = _b32alphabet.items()
|
||||
_b32tab.sort()
|
||||
_b32tab = [v for k, v in _b32tab]
|
||||
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
|
||||
|
||||
|
||||
def b32encode(s):
|
||||
"""Encode a string using Base32.
|
||||
|
||||
s is the string to encode. The encoded string is returned.
|
||||
"""
|
||||
parts = []
|
||||
quanta, leftover = divmod(len(s), 5)
|
||||
# Pad the last quantum with zero bits if necessary
|
||||
if leftover:
|
||||
s += ('\0' * (5 - leftover))
|
||||
quanta += 1
|
||||
for i in range(quanta):
|
||||
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
|
||||
# code is to process the 40 bits in units of 5 bits. So we take the 1
|
||||
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
|
||||
# bits of c2 and tack them onto c3. The shifts and masks are intended
|
||||
# to give us values of exactly 5 bits in width.
|
||||
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
|
||||
c2 += (c1 & 1) << 16 # 17 bits wide
|
||||
c3 += (c2 & 3) << 8 # 10 bits wide
|
||||
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
|
||||
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
|
||||
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
|
||||
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
|
||||
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
|
||||
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
|
||||
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
|
||||
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
|
||||
])
|
||||
encoded = EMPTYSTRING.join(parts)
|
||||
# Adjust for any leftover partial quanta
|
||||
if leftover == 1:
|
||||
return encoded[:-6] + '======'
|
||||
elif leftover == 2:
|
||||
return encoded[:-4] + '===='
|
||||
elif leftover == 3:
|
||||
return encoded[:-3] + '==='
|
||||
elif leftover == 4:
|
||||
return encoded[:-1] + '='
|
||||
return encoded
|
||||
|
||||
|
||||
def b32decode(s, casefold=False, map01=None):
|
||||
"""Decode a Base32 encoded string.
|
||||
|
||||
s is the string to decode. Optional casefold is a flag specifying whether
|
||||
a lowercase alphabet is acceptable as input. For security purposes, the
|
||||
default is False.
|
||||
|
||||
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
|
||||
(oh), and for optional mapping of the digit 1 (one) to either the letter I
|
||||
(eye) or letter L (el). The optional argument map01 when not None,
|
||||
specifies which letter the digit 1 should be mapped to (when map01 is not
|
||||
None, the digit 0 is always mapped to the letter O). For security
|
||||
purposes the default is None, so that 0 and 1 are not allowed in the
|
||||
input.
|
||||
|
||||
The decoded string is returned. A TypeError is raised if s were
|
||||
incorrectly padded or if there are non-alphabet characters present in the
|
||||
string.
|
||||
"""
|
||||
quanta, leftover = divmod(len(s), 8)
|
||||
if leftover:
|
||||
raise TypeError('Incorrect padding')
|
||||
# Handle section 2.4 zero and one mapping. The flag map01 will be either
|
||||
# False, or the character to map the digit 1 (one) to. It should be
|
||||
# either L (el) or I (eye).
|
||||
if map01:
|
||||
s = _translate(s, {'0': 'O', '1': map01})
|
||||
if casefold:
|
||||
s = s.upper()
|
||||
# Strip off pad characters from the right. We need to count the pad
|
||||
# characters because this will tell us how many null bytes to remove from
|
||||
# the end of the decoded string.
|
||||
padchars = 0
|
||||
mo = re.search('(?P<pad>[=]*)$', s)
|
||||
if mo:
|
||||
padchars = len(mo.group('pad'))
|
||||
if padchars > 0:
|
||||
s = s[:-padchars]
|
||||
# Now decode the full quanta
|
||||
parts = []
|
||||
acc = 0
|
||||
shift = 35
|
||||
for c in s:
|
||||
val = _b32rev.get(c)
|
||||
if val is None:
|
||||
raise TypeError('Non-base32 digit found')
|
||||
acc += _b32rev[c] << shift
|
||||
shift -= 5
|
||||
if shift < 0:
|
||||
parts.append(binascii.unhexlify('%010x' % acc))
|
||||
acc = 0
|
||||
shift = 35
|
||||
# Process the last, partial quanta
|
||||
last = binascii.unhexlify('%010x' % acc)
|
||||
if padchars == 0:
|
||||
last = '' # No characters
|
||||
elif padchars == 1:
|
||||
last = last[:-1]
|
||||
elif padchars == 3:
|
||||
last = last[:-2]
|
||||
elif padchars == 4:
|
||||
last = last[:-3]
|
||||
elif padchars == 6:
|
||||
last = last[:-4]
|
||||
else:
|
||||
raise TypeError('Incorrect padding')
|
||||
parts.append(last)
|
||||
return EMPTYSTRING.join(parts)
|
||||
|
||||
|
||||
|
||||
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
|
||||
# lowercase. The RFC also recommends against accepting input case
|
||||
# insensitively.
|
||||
def b16encode(s):
|
||||
"""Encode a string using Base16.
|
||||
|
||||
s is the string to encode. The encoded string is returned.
|
||||
"""
|
||||
return binascii.hexlify(s).upper()
|
||||
|
||||
|
||||
def b16decode(s, casefold=False):
|
||||
"""Decode a Base16 encoded string.
|
||||
|
||||
s is the string to decode. Optional casefold is a flag specifying whether
|
||||
a lowercase alphabet is acceptable as input. For security purposes, the
|
||||
default is False.
|
||||
|
||||
The decoded string is returned. A TypeError is raised if s were
|
||||
incorrectly padded or if there are non-alphabet characters present in the
|
||||
string.
|
||||
"""
|
||||
if casefold:
|
||||
s = s.upper()
|
||||
if re.search('[^0-9A-F]', s):
|
||||
raise TypeError('Non-base16 digit found')
|
||||
return binascii.unhexlify(s)
|
||||
|
||||
|
||||
|
||||
# Legacy interface. This code could be cleaned up since I don't believe
|
||||
# binascii has any line length limitations. It just doesn't seem worth it
|
||||
# though.
|
||||
|
||||
MAXLINESIZE = 76 # Excluding the CRLF
|
||||
MAXBINSIZE = (MAXLINESIZE//4)*3
|
||||
|
||||
def encode(input, output):
|
||||
"""Encode a file."""
|
||||
while True:
|
||||
s = input.read(MAXBINSIZE)
|
||||
if not s:
|
||||
break
|
||||
while len(s) < MAXBINSIZE:
|
||||
ns = input.read(MAXBINSIZE-len(s))
|
||||
if not ns:
|
||||
break
|
||||
s += ns
|
||||
line = binascii.b2a_base64(s)
|
||||
output.write(line)
|
||||
|
||||
|
||||
def decode(input, output):
|
||||
"""Decode a file."""
|
||||
while True:
|
||||
line = input.readline()
|
||||
if not line:
|
||||
break
|
||||
s = binascii.a2b_base64(line)
|
||||
output.write(s)
|
||||
|
||||
|
||||
def encodestring(s):
|
||||
"""Encode a string into multiple lines of base-64 data."""
|
||||
pieces = []
|
||||
for i in range(0, len(s), MAXBINSIZE):
|
||||
chunk = s[i : i + MAXBINSIZE]
|
||||
pieces.append(binascii.b2a_base64(chunk))
|
||||
return "".join(pieces)
|
||||
|
||||
|
||||
def decodestring(s):
|
||||
"""Decode a string."""
|
||||
return binascii.a2b_base64(s)
|
||||
|
||||
|
||||
|
||||
# Useable as a script...
|
||||
def test():
|
||||
"""Small test program"""
|
||||
import sys, getopt
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'deut')
|
||||
except getopt.error, msg:
|
||||
sys.stdout = sys.stderr
|
||||
print msg
|
||||
print """usage: %s [-d|-e|-u|-t] [file|-]
|
||||
-d, -u: decode
|
||||
-e: encode (default)
|
||||
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
|
||||
sys.exit(2)
|
||||
func = encode
|
||||
for o, a in opts:
|
||||
if o == '-e': func = encode
|
||||
if o == '-d': func = decode
|
||||
if o == '-u': func = decode
|
||||
if o == '-t': test1(); return
|
||||
if args and args[0] != '-':
|
||||
with open(args[0], 'rb') as f:
|
||||
func(f, sys.stdout)
|
||||
else:
|
||||
func(sys.stdin, sys.stdout)
|
||||
|
||||
|
||||
def test1():
|
||||
s0 = "Aladdin:open sesame"
|
||||
s1 = encodestring(s0)
|
||||
s2 = decodestring(s1)
|
||||
print s0, repr(s1), s2
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
Binary file not shown.
645
PythonHome/Lib/bdb.py
Normal file
645
PythonHome/Lib/bdb.py
Normal file
@ -0,0 +1,645 @@
|
||||
"""Debugger basics"""
|
||||
|
||||
import fnmatch
|
||||
import sys
|
||||
import os
|
||||
import types
|
||||
|
||||
__all__ = ["BdbQuit","Bdb","Breakpoint"]
|
||||
|
||||
class BdbQuit(Exception):
|
||||
"""Exception to give up completely"""
|
||||
|
||||
|
||||
class Bdb:
|
||||
|
||||
"""Generic Python debugger base class.
|
||||
|
||||
This class takes care of details of the trace facility;
|
||||
a derived class should implement user interaction.
|
||||
The standard debugger class (pdb.Pdb) is an example.
|
||||
"""
|
||||
|
||||
def __init__(self, skip=None):
|
||||
self.skip = set(skip) if skip else None
|
||||
self.breaks = {}
|
||||
self.fncache = {}
|
||||
self.frame_returning = None
|
||||
|
||||
def canonic(self, filename):
|
||||
if filename == "<" + filename[1:-1] + ">":
|
||||
return filename
|
||||
canonic = self.fncache.get(filename)
|
||||
if not canonic:
|
||||
canonic = os.path.abspath(filename)
|
||||
canonic = os.path.normcase(canonic)
|
||||
self.fncache[filename] = canonic
|
||||
return canonic
|
||||
|
||||
def reset(self):
|
||||
import linecache
|
||||
linecache.checkcache()
|
||||
self.botframe = None
|
||||
self._set_stopinfo(None, None)
|
||||
|
||||
def trace_dispatch(self, frame, event, arg):
|
||||
if self.quitting:
|
||||
return # None
|
||||
if event == 'line':
|
||||
return self.dispatch_line(frame)
|
||||
if event == 'call':
|
||||
return self.dispatch_call(frame, arg)
|
||||
if event == 'return':
|
||||
return self.dispatch_return(frame, arg)
|
||||
if event == 'exception':
|
||||
return self.dispatch_exception(frame, arg)
|
||||
if event == 'c_call':
|
||||
return self.trace_dispatch
|
||||
if event == 'c_exception':
|
||||
return self.trace_dispatch
|
||||
if event == 'c_return':
|
||||
return self.trace_dispatch
|
||||
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_line(self, frame):
|
||||
if self.stop_here(frame) or self.break_here(frame):
|
||||
self.user_line(frame)
|
||||
if self.quitting: raise BdbQuit
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_call(self, frame, arg):
|
||||
# XXX 'arg' is no longer used
|
||||
if self.botframe is None:
|
||||
# First call of dispatch since reset()
|
||||
self.botframe = frame.f_back # (CT) Note that this may also be None!
|
||||
return self.trace_dispatch
|
||||
if not (self.stop_here(frame) or self.break_anywhere(frame)):
|
||||
# No need to trace this function
|
||||
return # None
|
||||
self.user_call(frame, arg)
|
||||
if self.quitting: raise BdbQuit
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_return(self, frame, arg):
|
||||
if self.stop_here(frame) or frame == self.returnframe:
|
||||
try:
|
||||
self.frame_returning = frame
|
||||
self.user_return(frame, arg)
|
||||
finally:
|
||||
self.frame_returning = None
|
||||
if self.quitting: raise BdbQuit
|
||||
return self.trace_dispatch
|
||||
|
||||
def dispatch_exception(self, frame, arg):
|
||||
if self.stop_here(frame):
|
||||
self.user_exception(frame, arg)
|
||||
if self.quitting: raise BdbQuit
|
||||
return self.trace_dispatch
|
||||
|
||||
# Normally derived classes don't override the following
|
||||
# methods, but they may if they want to redefine the
|
||||
# definition of stopping and breakpoints.
|
||||
|
||||
def is_skipped_module(self, module_name):
|
||||
for pattern in self.skip:
|
||||
if fnmatch.fnmatch(module_name, pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
def stop_here(self, frame):
|
||||
# (CT) stopframe may now also be None, see dispatch_call.
|
||||
# (CT) the former test for None is therefore removed from here.
|
||||
if self.skip and \
|
||||
self.is_skipped_module(frame.f_globals.get('__name__')):
|
||||
return False
|
||||
if frame is self.stopframe:
|
||||
if self.stoplineno == -1:
|
||||
return False
|
||||
return frame.f_lineno >= self.stoplineno
|
||||
while frame is not None and frame is not self.stopframe:
|
||||
if frame is self.botframe:
|
||||
return True
|
||||
frame = frame.f_back
|
||||
return False
|
||||
|
||||
def break_here(self, frame):
|
||||
filename = self.canonic(frame.f_code.co_filename)
|
||||
if not filename in self.breaks:
|
||||
return False
|
||||
lineno = frame.f_lineno
|
||||
if not lineno in self.breaks[filename]:
|
||||
# The line itself has no breakpoint, but maybe the line is the
|
||||
# first line of a function with breakpoint set by function name.
|
||||
lineno = frame.f_code.co_firstlineno
|
||||
if not lineno in self.breaks[filename]:
|
||||
return False
|
||||
|
||||
# flag says ok to delete temp. bp
|
||||
(bp, flag) = effective(filename, lineno, frame)
|
||||
if bp:
|
||||
self.currentbp = bp.number
|
||||
if (flag and bp.temporary):
|
||||
self.do_clear(str(bp.number))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def do_clear(self, arg):
|
||||
raise NotImplementedError, "subclass of bdb must implement do_clear()"
|
||||
|
||||
def break_anywhere(self, frame):
|
||||
return self.canonic(frame.f_code.co_filename) in self.breaks
|
||||
|
||||
# Derived classes should override the user_* methods
|
||||
# to gain control.
|
||||
|
||||
def user_call(self, frame, argument_list):
|
||||
"""This method is called when there is the remote possibility
|
||||
that we ever need to stop in this function."""
|
||||
pass
|
||||
|
||||
def user_line(self, frame):
|
||||
"""This method is called when we stop or break at this line."""
|
||||
pass
|
||||
|
||||
def user_return(self, frame, return_value):
|
||||
"""This method is called when a return trap is set here."""
|
||||
pass
|
||||
|
||||
def user_exception(self, frame, exc_info):
|
||||
exc_type, exc_value, exc_traceback = exc_info
|
||||
"""This method is called if an exception occurs,
|
||||
but only if we are to stop at or just below this level."""
|
||||
pass
|
||||
|
||||
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
|
||||
self.stopframe = stopframe
|
||||
self.returnframe = returnframe
|
||||
self.quitting = 0
|
||||
# stoplineno >= 0 means: stop at line >= the stoplineno
|
||||
# stoplineno -1 means: don't stop at all
|
||||
self.stoplineno = stoplineno
|
||||
|
||||
# Derived classes and clients can call the following methods
|
||||
# to affect the stepping state.
|
||||
|
||||
def set_until(self, frame): #the name "until" is borrowed from gdb
|
||||
"""Stop when the line with the line no greater than the current one is
|
||||
reached or when returning from current frame"""
|
||||
self._set_stopinfo(frame, frame, frame.f_lineno+1)
|
||||
|
||||
def set_step(self):
|
||||
"""Stop after one line of code."""
|
||||
# Issue #13183: pdb skips frames after hitting a breakpoint and running
|
||||
# step commands.
|
||||
# Restore the trace function in the caller (that may not have been set
|
||||
# for performance reasons) when returning from the current frame.
|
||||
if self.frame_returning:
|
||||
caller_frame = self.frame_returning.f_back
|
||||
if caller_frame and not caller_frame.f_trace:
|
||||
caller_frame.f_trace = self.trace_dispatch
|
||||
self._set_stopinfo(None, None)
|
||||
|
||||
def set_next(self, frame):
|
||||
"""Stop on the next line in or below the given frame."""
|
||||
self._set_stopinfo(frame, None)
|
||||
|
||||
def set_return(self, frame):
|
||||
"""Stop when returning from the given frame."""
|
||||
self._set_stopinfo(frame.f_back, frame)
|
||||
|
||||
def set_trace(self, frame=None):
|
||||
"""Start debugging from `frame`.
|
||||
|
||||
If frame is not specified, debugging starts from caller's frame.
|
||||
"""
|
||||
if frame is None:
|
||||
frame = sys._getframe().f_back
|
||||
self.reset()
|
||||
while frame:
|
||||
frame.f_trace = self.trace_dispatch
|
||||
self.botframe = frame
|
||||
frame = frame.f_back
|
||||
self.set_step()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
|
||||
def set_continue(self):
|
||||
# Don't stop except at breakpoints or when finished
|
||||
self._set_stopinfo(self.botframe, None, -1)
|
||||
if not self.breaks:
|
||||
# no breakpoints; run without debugger overhead
|
||||
sys.settrace(None)
|
||||
frame = sys._getframe().f_back
|
||||
while frame and frame is not self.botframe:
|
||||
del frame.f_trace
|
||||
frame = frame.f_back
|
||||
|
||||
def set_quit(self):
|
||||
self.stopframe = self.botframe
|
||||
self.returnframe = None
|
||||
self.quitting = 1
|
||||
sys.settrace(None)
|
||||
|
||||
# Derived classes and clients can call the following methods
|
||||
# to manipulate breakpoints. These methods return an
|
||||
# error message is something went wrong, None if all is well.
|
||||
# Set_break prints out the breakpoint line and file:lineno.
|
||||
# Call self.get_*break*() to see the breakpoints or better
|
||||
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
|
||||
|
||||
def set_break(self, filename, lineno, temporary=0, cond = None,
|
||||
funcname=None):
|
||||
filename = self.canonic(filename)
|
||||
import linecache # Import as late as possible
|
||||
line = linecache.getline(filename, lineno)
|
||||
if not line:
|
||||
return 'Line %s:%d does not exist' % (filename,
|
||||
lineno)
|
||||
if not filename in self.breaks:
|
||||
self.breaks[filename] = []
|
||||
list = self.breaks[filename]
|
||||
if not lineno in list:
|
||||
list.append(lineno)
|
||||
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
|
||||
|
||||
def _prune_breaks(self, filename, lineno):
|
||||
if (filename, lineno) not in Breakpoint.bplist:
|
||||
self.breaks[filename].remove(lineno)
|
||||
if not self.breaks[filename]:
|
||||
del self.breaks[filename]
|
||||
|
||||
def clear_break(self, filename, lineno):
|
||||
filename = self.canonic(filename)
|
||||
if not filename in self.breaks:
|
||||
return 'There are no breakpoints in %s' % filename
|
||||
if lineno not in self.breaks[filename]:
|
||||
return 'There is no breakpoint at %s:%d' % (filename,
|
||||
lineno)
|
||||
# If there's only one bp in the list for that file,line
|
||||
# pair, then remove the breaks entry
|
||||
for bp in Breakpoint.bplist[filename, lineno][:]:
|
||||
bp.deleteMe()
|
||||
self._prune_breaks(filename, lineno)
|
||||
|
||||
def clear_bpbynumber(self, arg):
|
||||
try:
|
||||
number = int(arg)
|
||||
except:
|
||||
return 'Non-numeric breakpoint number (%s)' % arg
|
||||
try:
|
||||
bp = Breakpoint.bpbynumber[number]
|
||||
except IndexError:
|
||||
return 'Breakpoint number (%d) out of range' % number
|
||||
if not bp:
|
||||
return 'Breakpoint (%d) already deleted' % number
|
||||
bp.deleteMe()
|
||||
self._prune_breaks(bp.file, bp.line)
|
||||
|
||||
def clear_all_file_breaks(self, filename):
|
||||
filename = self.canonic(filename)
|
||||
if not filename in self.breaks:
|
||||
return 'There are no breakpoints in %s' % filename
|
||||
for line in self.breaks[filename]:
|
||||
blist = Breakpoint.bplist[filename, line]
|
||||
for bp in blist:
|
||||
bp.deleteMe()
|
||||
del self.breaks[filename]
|
||||
|
||||
def clear_all_breaks(self):
|
||||
if not self.breaks:
|
||||
return 'There are no breakpoints'
|
||||
for bp in Breakpoint.bpbynumber:
|
||||
if bp:
|
||||
bp.deleteMe()
|
||||
self.breaks = {}
|
||||
|
||||
def get_break(self, filename, lineno):
|
||||
filename = self.canonic(filename)
|
||||
return filename in self.breaks and \
|
||||
lineno in self.breaks[filename]
|
||||
|
||||
def get_breaks(self, filename, lineno):
|
||||
filename = self.canonic(filename)
|
||||
return filename in self.breaks and \
|
||||
lineno in self.breaks[filename] and \
|
||||
Breakpoint.bplist[filename, lineno] or []
|
||||
|
||||
def get_file_breaks(self, filename):
|
||||
filename = self.canonic(filename)
|
||||
if filename in self.breaks:
|
||||
return self.breaks[filename]
|
||||
else:
|
||||
return []
|
||||
|
||||
def get_all_breaks(self):
|
||||
return self.breaks
|
||||
|
||||
# Derived classes and clients can call the following method
|
||||
# to get a data structure representing a stack trace.
|
||||
|
||||
def get_stack(self, f, t):
|
||||
stack = []
|
||||
if t and t.tb_frame is f:
|
||||
t = t.tb_next
|
||||
while f is not None:
|
||||
stack.append((f, f.f_lineno))
|
||||
if f is self.botframe:
|
||||
break
|
||||
f = f.f_back
|
||||
stack.reverse()
|
||||
i = max(0, len(stack) - 1)
|
||||
while t is not None:
|
||||
stack.append((t.tb_frame, t.tb_lineno))
|
||||
t = t.tb_next
|
||||
if f is None:
|
||||
i = max(0, len(stack) - 1)
|
||||
return stack, i
|
||||
|
||||
#
|
||||
|
||||
def format_stack_entry(self, frame_lineno, lprefix=': '):
|
||||
import linecache, repr
|
||||
frame, lineno = frame_lineno
|
||||
filename = self.canonic(frame.f_code.co_filename)
|
||||
s = '%s(%r)' % (filename, lineno)
|
||||
if frame.f_code.co_name:
|
||||
s = s + frame.f_code.co_name
|
||||
else:
|
||||
s = s + "<lambda>"
|
||||
if '__args__' in frame.f_locals:
|
||||
args = frame.f_locals['__args__']
|
||||
else:
|
||||
args = None
|
||||
if args:
|
||||
s = s + repr.repr(args)
|
||||
else:
|
||||
s = s + '()'
|
||||
if '__return__' in frame.f_locals:
|
||||
rv = frame.f_locals['__return__']
|
||||
s = s + '->'
|
||||
s = s + repr.repr(rv)
|
||||
line = linecache.getline(filename, lineno, frame.f_globals)
|
||||
if line: s = s + lprefix + line.strip()
|
||||
return s
|
||||
|
||||
# The following two methods can be called by clients to use
|
||||
# a debugger to debug a statement, given as a string.
|
||||
|
||||
def run(self, cmd, globals=None, locals=None):
|
||||
if globals is None:
|
||||
import __main__
|
||||
globals = __main__.__dict__
|
||||
if locals is None:
|
||||
locals = globals
|
||||
self.reset()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
if not isinstance(cmd, types.CodeType):
|
||||
cmd = cmd+'\n'
|
||||
try:
|
||||
exec cmd in globals, locals
|
||||
except BdbQuit:
|
||||
pass
|
||||
finally:
|
||||
self.quitting = 1
|
||||
sys.settrace(None)
|
||||
|
||||
def runeval(self, expr, globals=None, locals=None):
|
||||
if globals is None:
|
||||
import __main__
|
||||
globals = __main__.__dict__
|
||||
if locals is None:
|
||||
locals = globals
|
||||
self.reset()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
if not isinstance(expr, types.CodeType):
|
||||
expr = expr+'\n'
|
||||
try:
|
||||
return eval(expr, globals, locals)
|
||||
except BdbQuit:
|
||||
pass
|
||||
finally:
|
||||
self.quitting = 1
|
||||
sys.settrace(None)
|
||||
|
||||
def runctx(self, cmd, globals, locals):
|
||||
# B/W compatibility
|
||||
self.run(cmd, globals, locals)
|
||||
|
||||
# This method is more useful to debug a single function call.
|
||||
|
||||
def runcall(self, func, *args, **kwds):
|
||||
self.reset()
|
||||
sys.settrace(self.trace_dispatch)
|
||||
res = None
|
||||
try:
|
||||
res = func(*args, **kwds)
|
||||
except BdbQuit:
|
||||
pass
|
||||
finally:
|
||||
self.quitting = 1
|
||||
sys.settrace(None)
|
||||
return res
|
||||
|
||||
|
||||
def set_trace():
|
||||
Bdb().set_trace()
|
||||
|
||||
|
||||
class Breakpoint:
|
||||
|
||||
"""Breakpoint class
|
||||
|
||||
Implements temporary breakpoints, ignore counts, disabling and
|
||||
(re)-enabling, and conditionals.
|
||||
|
||||
Breakpoints are indexed by number through bpbynumber and by
|
||||
the file,line tuple using bplist. The former points to a
|
||||
single instance of class Breakpoint. The latter points to a
|
||||
list of such instances since there may be more than one
|
||||
breakpoint per line.
|
||||
|
||||
"""
|
||||
|
||||
# XXX Keeping state in the class is a mistake -- this means
|
||||
# you cannot have more than one active Bdb instance.
|
||||
|
||||
next = 1 # Next bp to be assigned
|
||||
bplist = {} # indexed by (file, lineno) tuple
|
||||
bpbynumber = [None] # Each entry is None or an instance of Bpt
|
||||
# index 0 is unused, except for marking an
|
||||
# effective break .... see effective()
|
||||
|
||||
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
|
||||
self.funcname = funcname
|
||||
# Needed if funcname is not None.
|
||||
self.func_first_executable_line = None
|
||||
self.file = file # This better be in canonical form!
|
||||
self.line = line
|
||||
self.temporary = temporary
|
||||
self.cond = cond
|
||||
self.enabled = 1
|
||||
self.ignore = 0
|
||||
self.hits = 0
|
||||
self.number = Breakpoint.next
|
||||
Breakpoint.next = Breakpoint.next + 1
|
||||
# Build the two lists
|
||||
self.bpbynumber.append(self)
|
||||
if (file, line) in self.bplist:
|
||||
self.bplist[file, line].append(self)
|
||||
else:
|
||||
self.bplist[file, line] = [self]
|
||||
|
||||
|
||||
def deleteMe(self):
|
||||
index = (self.file, self.line)
|
||||
self.bpbynumber[self.number] = None # No longer in list
|
||||
self.bplist[index].remove(self)
|
||||
if not self.bplist[index]:
|
||||
# No more bp for this f:l combo
|
||||
del self.bplist[index]
|
||||
|
||||
def enable(self):
|
||||
self.enabled = 1
|
||||
|
||||
def disable(self):
|
||||
self.enabled = 0
|
||||
|
||||
def bpprint(self, out=None):
|
||||
if out is None:
|
||||
out = sys.stdout
|
||||
if self.temporary:
|
||||
disp = 'del '
|
||||
else:
|
||||
disp = 'keep '
|
||||
if self.enabled:
|
||||
disp = disp + 'yes '
|
||||
else:
|
||||
disp = disp + 'no '
|
||||
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
|
||||
self.file, self.line)
|
||||
if self.cond:
|
||||
print >>out, '\tstop only if %s' % (self.cond,)
|
||||
if self.ignore:
|
||||
print >>out, '\tignore next %d hits' % (self.ignore)
|
||||
if (self.hits):
|
||||
if (self.hits > 1): ss = 's'
|
||||
else: ss = ''
|
||||
print >>out, ('\tbreakpoint already hit %d time%s' %
|
||||
(self.hits, ss))
|
||||
|
||||
# -----------end of Breakpoint class----------
|
||||
|
||||
def checkfuncname(b, frame):
|
||||
"""Check whether we should break here because of `b.funcname`."""
|
||||
if not b.funcname:
|
||||
# Breakpoint was set via line number.
|
||||
if b.line != frame.f_lineno:
|
||||
# Breakpoint was set at a line with a def statement and the function
|
||||
# defined is called: don't break.
|
||||
return False
|
||||
return True
|
||||
|
||||
# Breakpoint set via function name.
|
||||
|
||||
if frame.f_code.co_name != b.funcname:
|
||||
# It's not a function call, but rather execution of def statement.
|
||||
return False
|
||||
|
||||
# We are in the right frame.
|
||||
if not b.func_first_executable_line:
|
||||
# The function is entered for the 1st time.
|
||||
b.func_first_executable_line = frame.f_lineno
|
||||
|
||||
if b.func_first_executable_line != frame.f_lineno:
|
||||
# But we are not at the first line number: don't break.
|
||||
return False
|
||||
return True
|
||||
|
||||
# Determines if there is an effective (active) breakpoint at this
|
||||
# line of code. Returns breakpoint number or 0 if none
|
||||
def effective(file, line, frame):
|
||||
"""Determine which breakpoint for this file:line is to be acted upon.
|
||||
|
||||
Called only if we know there is a bpt at this
|
||||
location. Returns breakpoint that was triggered and a flag
|
||||
that indicates if it is ok to delete a temporary bp.
|
||||
|
||||
"""
|
||||
possibles = Breakpoint.bplist[file,line]
|
||||
for i in range(0, len(possibles)):
|
||||
b = possibles[i]
|
||||
if b.enabled == 0:
|
||||
continue
|
||||
if not checkfuncname(b, frame):
|
||||
continue
|
||||
# Count every hit when bp is enabled
|
||||
b.hits = b.hits + 1
|
||||
if not b.cond:
|
||||
# If unconditional, and ignoring,
|
||||
# go on to next, else break
|
||||
if b.ignore > 0:
|
||||
b.ignore = b.ignore -1
|
||||
continue
|
||||
else:
|
||||
# breakpoint and marker that's ok
|
||||
# to delete if temporary
|
||||
return (b,1)
|
||||
else:
|
||||
# Conditional bp.
|
||||
# Ignore count applies only to those bpt hits where the
|
||||
# condition evaluates to true.
|
||||
try:
|
||||
val = eval(b.cond, frame.f_globals,
|
||||
frame.f_locals)
|
||||
if val:
|
||||
if b.ignore > 0:
|
||||
b.ignore = b.ignore -1
|
||||
# continue
|
||||
else:
|
||||
return (b,1)
|
||||
# else:
|
||||
# continue
|
||||
except:
|
||||
# if eval fails, most conservative
|
||||
# thing is to stop on breakpoint
|
||||
# regardless of ignore count.
|
||||
# Don't delete temporary,
|
||||
# as another hint to user.
|
||||
return (b,0)
|
||||
return (None, None)
|
||||
|
||||
# -------------------- testing --------------------
|
||||
|
||||
class Tdb(Bdb):
|
||||
def user_call(self, frame, args):
|
||||
name = frame.f_code.co_name
|
||||
if not name: name = '???'
|
||||
print '+++ call', name, args
|
||||
def user_line(self, frame):
|
||||
import linecache
|
||||
name = frame.f_code.co_name
|
||||
if not name: name = '???'
|
||||
fn = self.canonic(frame.f_code.co_filename)
|
||||
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
|
||||
print '+++', fn, frame.f_lineno, name, ':', line.strip()
|
||||
def user_return(self, frame, retval):
|
||||
print '+++ return', retval
|
||||
def user_exception(self, frame, exc_stuff):
|
||||
print '+++ exception', exc_stuff
|
||||
self.set_continue()
|
||||
|
||||
def foo(n):
|
||||
print 'foo(', n, ')'
|
||||
x = bar(n*10)
|
||||
print 'bar returned', x
|
||||
|
||||
def bar(a):
|
||||
print 'bar(', a, ')'
|
||||
return a/2
|
||||
|
||||
def test():
|
||||
t = Tdb()
|
||||
t.run('import bdb; bdb.foo(10)')
|
||||
|
||||
# end
|
Binary file not shown.
508
PythonHome/Lib/binhex.py
Normal file
508
PythonHome/Lib/binhex.py
Normal file
@ -0,0 +1,508 @@
|
||||
"""Macintosh binhex compression/decompression.
|
||||
|
||||
easy interface:
|
||||
binhex(inputfilename, outputfilename)
|
||||
hexbin(inputfilename, outputfilename)
|
||||
"""
|
||||
|
||||
#
|
||||
# Jack Jansen, CWI, August 1995.
|
||||
#
|
||||
# The module is supposed to be as compatible as possible. Especially the
|
||||
# easy interface should work "as expected" on any platform.
|
||||
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
|
||||
# We seem to lack a simple character-translate in python.
|
||||
# (we should probably use ISO-Latin-1 on all but the mac platform).
|
||||
# XXXX The simple routines are too simple: they expect to hold the complete
|
||||
# files in-core. Should be fixed.
|
||||
# XXXX It would be nice to handle AppleDouble format on unix
|
||||
# (for servers serving macs).
|
||||
# XXXX I don't understand what happens when you get 0x90 times the same byte on
|
||||
# input. The resulting code (xx 90 90) would appear to be interpreted as an
|
||||
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
|
||||
#
|
||||
import sys
|
||||
import os
|
||||
import struct
|
||||
import binascii
|
||||
|
||||
__all__ = ["binhex","hexbin","Error"]
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
# States (what have we written)
|
||||
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
|
||||
|
||||
# Various constants
|
||||
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
|
||||
LINELEN=64
|
||||
RUNCHAR=chr(0x90) # run-length introducer
|
||||
|
||||
#
|
||||
# This code is no longer byte-order dependent
|
||||
|
||||
#
|
||||
# Workarounds for non-mac machines.
|
||||
try:
|
||||
from Carbon.File import FSSpec, FInfo
|
||||
from MacOS import openrf
|
||||
|
||||
def getfileinfo(name):
|
||||
finfo = FSSpec(name).FSpGetFInfo()
|
||||
dir, file = os.path.split(name)
|
||||
# XXX Get resource/data sizes
|
||||
fp = open(name, 'rb')
|
||||
fp.seek(0, 2)
|
||||
dlen = fp.tell()
|
||||
fp = openrf(name, '*rb')
|
||||
fp.seek(0, 2)
|
||||
rlen = fp.tell()
|
||||
return file, finfo, dlen, rlen
|
||||
|
||||
def openrsrc(name, *mode):
|
||||
if not mode:
|
||||
mode = '*rb'
|
||||
else:
|
||||
mode = '*' + mode[0]
|
||||
return openrf(name, mode)
|
||||
|
||||
except ImportError:
|
||||
#
|
||||
# Glue code for non-macintosh usage
|
||||
#
|
||||
|
||||
class FInfo:
|
||||
def __init__(self):
|
||||
self.Type = '????'
|
||||
self.Creator = '????'
|
||||
self.Flags = 0
|
||||
|
||||
def getfileinfo(name):
|
||||
finfo = FInfo()
|
||||
# Quick check for textfile
|
||||
fp = open(name)
|
||||
data = open(name).read(256)
|
||||
for c in data:
|
||||
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
|
||||
break
|
||||
else:
|
||||
finfo.Type = 'TEXT'
|
||||
fp.seek(0, 2)
|
||||
dsize = fp.tell()
|
||||
fp.close()
|
||||
dir, file = os.path.split(name)
|
||||
file = file.replace(':', '-', 1)
|
||||
return file, finfo, dsize, 0
|
||||
|
||||
class openrsrc:
|
||||
def __init__(self, *args):
|
||||
pass
|
||||
|
||||
def read(self, *args):
|
||||
return ''
|
||||
|
||||
def write(self, *args):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
class _Hqxcoderengine:
|
||||
"""Write data to the coder in 3-byte chunks"""
|
||||
|
||||
def __init__(self, ofp):
|
||||
self.ofp = ofp
|
||||
self.data = ''
|
||||
self.hqxdata = ''
|
||||
self.linelen = LINELEN-1
|
||||
|
||||
def write(self, data):
|
||||
self.data = self.data + data
|
||||
datalen = len(self.data)
|
||||
todo = (datalen//3)*3
|
||||
data = self.data[:todo]
|
||||
self.data = self.data[todo:]
|
||||
if not data:
|
||||
return
|
||||
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
|
||||
self._flush(0)
|
||||
|
||||
def _flush(self, force):
|
||||
first = 0
|
||||
while first <= len(self.hqxdata)-self.linelen:
|
||||
last = first + self.linelen
|
||||
self.ofp.write(self.hqxdata[first:last]+'\n')
|
||||
self.linelen = LINELEN
|
||||
first = last
|
||||
self.hqxdata = self.hqxdata[first:]
|
||||
if force:
|
||||
self.ofp.write(self.hqxdata + ':\n')
|
||||
|
||||
def close(self):
|
||||
if self.data:
|
||||
self.hqxdata = \
|
||||
self.hqxdata + binascii.b2a_hqx(self.data)
|
||||
self._flush(1)
|
||||
self.ofp.close()
|
||||
del self.ofp
|
||||
|
||||
class _Rlecoderengine:
|
||||
"""Write data to the RLE-coder in suitably large chunks"""
|
||||
|
||||
def __init__(self, ofp):
|
||||
self.ofp = ofp
|
||||
self.data = ''
|
||||
|
||||
def write(self, data):
|
||||
self.data = self.data + data
|
||||
if len(self.data) < REASONABLY_LARGE:
|
||||
return
|
||||
rledata = binascii.rlecode_hqx(self.data)
|
||||
self.ofp.write(rledata)
|
||||
self.data = ''
|
||||
|
||||
def close(self):
|
||||
if self.data:
|
||||
rledata = binascii.rlecode_hqx(self.data)
|
||||
self.ofp.write(rledata)
|
||||
self.ofp.close()
|
||||
del self.ofp
|
||||
|
||||
class BinHex:
|
||||
def __init__(self, name_finfo_dlen_rlen, ofp):
|
||||
name, finfo, dlen, rlen = name_finfo_dlen_rlen
|
||||
if type(ofp) == type(''):
|
||||
ofname = ofp
|
||||
ofp = open(ofname, 'w')
|
||||
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
|
||||
hqxer = _Hqxcoderengine(ofp)
|
||||
self.ofp = _Rlecoderengine(hqxer)
|
||||
self.crc = 0
|
||||
if finfo is None:
|
||||
finfo = FInfo()
|
||||
self.dlen = dlen
|
||||
self.rlen = rlen
|
||||
self._writeinfo(name, finfo)
|
||||
self.state = _DID_HEADER
|
||||
|
||||
def _writeinfo(self, name, finfo):
|
||||
nl = len(name)
|
||||
if nl > 63:
|
||||
raise Error, 'Filename too long'
|
||||
d = chr(nl) + name + '\0'
|
||||
d2 = finfo.Type + finfo.Creator
|
||||
|
||||
# Force all structs to be packed with big-endian
|
||||
d3 = struct.pack('>h', finfo.Flags)
|
||||
d4 = struct.pack('>ii', self.dlen, self.rlen)
|
||||
info = d + d2 + d3 + d4
|
||||
self._write(info)
|
||||
self._writecrc()
|
||||
|
||||
def _write(self, data):
|
||||
self.crc = binascii.crc_hqx(data, self.crc)
|
||||
self.ofp.write(data)
|
||||
|
||||
def _writecrc(self):
|
||||
# XXXX Should this be here??
|
||||
# self.crc = binascii.crc_hqx('\0\0', self.crc)
|
||||
if self.crc < 0:
|
||||
fmt = '>h'
|
||||
else:
|
||||
fmt = '>H'
|
||||
self.ofp.write(struct.pack(fmt, self.crc))
|
||||
self.crc = 0
|
||||
|
||||
def write(self, data):
|
||||
if self.state != _DID_HEADER:
|
||||
raise Error, 'Writing data at the wrong time'
|
||||
self.dlen = self.dlen - len(data)
|
||||
self._write(data)
|
||||
|
||||
def close_data(self):
|
||||
if self.dlen != 0:
|
||||
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
|
||||
self._writecrc()
|
||||
self.state = _DID_DATA
|
||||
|
||||
def write_rsrc(self, data):
|
||||
if self.state < _DID_DATA:
|
||||
self.close_data()
|
||||
if self.state != _DID_DATA:
|
||||
raise Error, 'Writing resource data at the wrong time'
|
||||
self.rlen = self.rlen - len(data)
|
||||
self._write(data)
|
||||
|
||||
def close(self):
|
||||
if self.state < _DID_DATA:
|
||||
self.close_data()
|
||||
if self.state != _DID_DATA:
|
||||
raise Error, 'Close at the wrong time'
|
||||
if self.rlen != 0:
|
||||
raise Error, \
|
||||
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
|
||||
self._writecrc()
|
||||
self.ofp.close()
|
||||
self.state = None
|
||||
del self.ofp
|
||||
|
||||
def binhex(inp, out):
|
||||
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
|
||||
finfo = getfileinfo(inp)
|
||||
ofp = BinHex(finfo, out)
|
||||
|
||||
ifp = open(inp, 'rb')
|
||||
# XXXX Do textfile translation on non-mac systems
|
||||
while 1:
|
||||
d = ifp.read(128000)
|
||||
if not d: break
|
||||
ofp.write(d)
|
||||
ofp.close_data()
|
||||
ifp.close()
|
||||
|
||||
ifp = openrsrc(inp, 'rb')
|
||||
while 1:
|
||||
d = ifp.read(128000)
|
||||
if not d: break
|
||||
ofp.write_rsrc(d)
|
||||
ofp.close()
|
||||
ifp.close()
|
||||
|
||||
class _Hqxdecoderengine:
|
||||
"""Read data via the decoder in 4-byte chunks"""
|
||||
|
||||
def __init__(self, ifp):
|
||||
self.ifp = ifp
|
||||
self.eof = 0
|
||||
|
||||
def read(self, totalwtd):
|
||||
"""Read at least wtd bytes (or until EOF)"""
|
||||
decdata = ''
|
||||
wtd = totalwtd
|
||||
#
|
||||
# The loop here is convoluted, since we don't really now how
|
||||
# much to decode: there may be newlines in the incoming data.
|
||||
while wtd > 0:
|
||||
if self.eof: return decdata
|
||||
wtd = ((wtd+2)//3)*4
|
||||
data = self.ifp.read(wtd)
|
||||
#
|
||||
# Next problem: there may not be a complete number of
|
||||
# bytes in what we pass to a2b. Solve by yet another
|
||||
# loop.
|
||||
#
|
||||
while 1:
|
||||
try:
|
||||
decdatacur, self.eof = \
|
||||
binascii.a2b_hqx(data)
|
||||
break
|
||||
except binascii.Incomplete:
|
||||
pass
|
||||
newdata = self.ifp.read(1)
|
||||
if not newdata:
|
||||
raise Error, \
|
||||
'Premature EOF on binhex file'
|
||||
data = data + newdata
|
||||
decdata = decdata + decdatacur
|
||||
wtd = totalwtd - len(decdata)
|
||||
if not decdata and not self.eof:
|
||||
raise Error, 'Premature EOF on binhex file'
|
||||
return decdata
|
||||
|
||||
def close(self):
|
||||
self.ifp.close()
|
||||
|
||||
class _Rledecoderengine:
|
||||
"""Read data via the RLE-coder"""
|
||||
|
||||
def __init__(self, ifp):
|
||||
self.ifp = ifp
|
||||
self.pre_buffer = ''
|
||||
self.post_buffer = ''
|
||||
self.eof = 0
|
||||
|
||||
def read(self, wtd):
|
||||
if wtd > len(self.post_buffer):
|
||||
self._fill(wtd-len(self.post_buffer))
|
||||
rv = self.post_buffer[:wtd]
|
||||
self.post_buffer = self.post_buffer[wtd:]
|
||||
return rv
|
||||
|
||||
def _fill(self, wtd):
|
||||
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
|
||||
if self.ifp.eof:
|
||||
self.post_buffer = self.post_buffer + \
|
||||
binascii.rledecode_hqx(self.pre_buffer)
|
||||
self.pre_buffer = ''
|
||||
return
|
||||
|
||||
#
|
||||
# Obfuscated code ahead. We have to take care that we don't
|
||||
# end up with an orphaned RUNCHAR later on. So, we keep a couple
|
||||
# of bytes in the buffer, depending on what the end of
|
||||
# the buffer looks like:
|
||||
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
|
||||
# '?\220' - Keep 2 bytes: repeated something-else
|
||||
# '\220\0' - Escaped \220: Keep 2 bytes.
|
||||
# '?\220?' - Complete repeat sequence: decode all
|
||||
# otherwise: keep 1 byte.
|
||||
#
|
||||
mark = len(self.pre_buffer)
|
||||
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
|
||||
mark = mark - 3
|
||||
elif self.pre_buffer[-1] == RUNCHAR:
|
||||
mark = mark - 2
|
||||
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
|
||||
mark = mark - 2
|
||||
elif self.pre_buffer[-2] == RUNCHAR:
|
||||
pass # Decode all
|
||||
else:
|
||||
mark = mark - 1
|
||||
|
||||
self.post_buffer = self.post_buffer + \
|
||||
binascii.rledecode_hqx(self.pre_buffer[:mark])
|
||||
self.pre_buffer = self.pre_buffer[mark:]
|
||||
|
||||
def close(self):
|
||||
self.ifp.close()
|
||||
|
||||
class HexBin:
|
||||
def __init__(self, ifp):
|
||||
if type(ifp) == type(''):
|
||||
ifp = open(ifp)
|
||||
#
|
||||
# Find initial colon.
|
||||
#
|
||||
while 1:
|
||||
ch = ifp.read(1)
|
||||
if not ch:
|
||||
raise Error, "No binhex data found"
|
||||
# Cater for \r\n terminated lines (which show up as \n\r, hence
|
||||
# all lines start with \r)
|
||||
if ch == '\r':
|
||||
continue
|
||||
if ch == ':':
|
||||
break
|
||||
if ch != '\n':
|
||||
dummy = ifp.readline()
|
||||
|
||||
hqxifp = _Hqxdecoderengine(ifp)
|
||||
self.ifp = _Rledecoderengine(hqxifp)
|
||||
self.crc = 0
|
||||
self._readheader()
|
||||
|
||||
def _read(self, len):
|
||||
data = self.ifp.read(len)
|
||||
self.crc = binascii.crc_hqx(data, self.crc)
|
||||
return data
|
||||
|
||||
def _checkcrc(self):
|
||||
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
|
||||
#self.crc = binascii.crc_hqx('\0\0', self.crc)
|
||||
# XXXX Is this needed??
|
||||
self.crc = self.crc & 0xffff
|
||||
if filecrc != self.crc:
|
||||
raise Error, 'CRC error, computed %x, read %x' \
|
||||
%(self.crc, filecrc)
|
||||
self.crc = 0
|
||||
|
||||
def _readheader(self):
|
||||
len = self._read(1)
|
||||
fname = self._read(ord(len))
|
||||
rest = self._read(1+4+4+2+4+4)
|
||||
self._checkcrc()
|
||||
|
||||
type = rest[1:5]
|
||||
creator = rest[5:9]
|
||||
flags = struct.unpack('>h', rest[9:11])[0]
|
||||
self.dlen = struct.unpack('>l', rest[11:15])[0]
|
||||
self.rlen = struct.unpack('>l', rest[15:19])[0]
|
||||
|
||||
self.FName = fname
|
||||
self.FInfo = FInfo()
|
||||
self.FInfo.Creator = creator
|
||||
self.FInfo.Type = type
|
||||
self.FInfo.Flags = flags
|
||||
|
||||
self.state = _DID_HEADER
|
||||
|
||||
def read(self, *n):
|
||||
if self.state != _DID_HEADER:
|
||||
raise Error, 'Read data at wrong time'
|
||||
if n:
|
||||
n = n[0]
|
||||
n = min(n, self.dlen)
|
||||
else:
|
||||
n = self.dlen
|
||||
rv = ''
|
||||
while len(rv) < n:
|
||||
rv = rv + self._read(n-len(rv))
|
||||
self.dlen = self.dlen - n
|
||||
return rv
|
||||
|
||||
def close_data(self):
|
||||
if self.state != _DID_HEADER:
|
||||
raise Error, 'close_data at wrong time'
|
||||
if self.dlen:
|
||||
dummy = self._read(self.dlen)
|
||||
self._checkcrc()
|
||||
self.state = _DID_DATA
|
||||
|
||||
def read_rsrc(self, *n):
|
||||
if self.state == _DID_HEADER:
|
||||
self.close_data()
|
||||
if self.state != _DID_DATA:
|
||||
raise Error, 'Read resource data at wrong time'
|
||||
if n:
|
||||
n = n[0]
|
||||
n = min(n, self.rlen)
|
||||
else:
|
||||
n = self.rlen
|
||||
self.rlen = self.rlen - n
|
||||
return self._read(n)
|
||||
|
||||
def close(self):
|
||||
if self.rlen:
|
||||
dummy = self.read_rsrc(self.rlen)
|
||||
self._checkcrc()
|
||||
self.state = _DID_RSRC
|
||||
self.ifp.close()
|
||||
|
||||
def hexbin(inp, out):
|
||||
"""(infilename, outfilename) - Decode binhexed file"""
|
||||
ifp = HexBin(inp)
|
||||
finfo = ifp.FInfo
|
||||
if not out:
|
||||
out = ifp.FName
|
||||
|
||||
ofp = open(out, 'wb')
|
||||
# XXXX Do translation on non-mac systems
|
||||
while 1:
|
||||
d = ifp.read(128000)
|
||||
if not d: break
|
||||
ofp.write(d)
|
||||
ofp.close()
|
||||
ifp.close_data()
|
||||
|
||||
d = ifp.read_rsrc(128000)
|
||||
if d:
|
||||
ofp = openrsrc(out, 'wb')
|
||||
ofp.write(d)
|
||||
while 1:
|
||||
d = ifp.read_rsrc(128000)
|
||||
if not d: break
|
||||
ofp.write(d)
|
||||
ofp.close()
|
||||
|
||||
ifp.close()
|
||||
|
||||
def _test():
|
||||
fname = sys.argv[1]
|
||||
binhex(fname, fname+'.hqx')
|
||||
hexbin(fname+'.hqx', fname+'.viahqx')
|
||||
#hexbin(fname, fname+'.unpacked')
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
_test()
|
Binary file not shown.
92
PythonHome/Lib/bisect.py
Normal file
92
PythonHome/Lib/bisect.py
Normal file
@ -0,0 +1,92 @@
|
||||
"""Bisection algorithms."""
|
||||
|
||||
def insort_right(a, x, lo=0, hi=None):
|
||||
"""Insert item x in list a, and keep it sorted assuming a is sorted.
|
||||
|
||||
If x is already in a, insert it to the right of the rightmost x.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if x < a[mid]: hi = mid
|
||||
else: lo = mid+1
|
||||
a.insert(lo, x)
|
||||
|
||||
insort = insort_right # backward compatibility
|
||||
|
||||
def bisect_right(a, x, lo=0, hi=None):
|
||||
"""Return the index where to insert item x in list a, assuming a is sorted.
|
||||
|
||||
The return value i is such that all e in a[:i] have e <= x, and all e in
|
||||
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
|
||||
insert just after the rightmost x already there.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if x < a[mid]: hi = mid
|
||||
else: lo = mid+1
|
||||
return lo
|
||||
|
||||
bisect = bisect_right # backward compatibility
|
||||
|
||||
def insort_left(a, x, lo=0, hi=None):
|
||||
"""Insert item x in list a, and keep it sorted assuming a is sorted.
|
||||
|
||||
If x is already in a, insert it to the left of the leftmost x.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if a[mid] < x: lo = mid+1
|
||||
else: hi = mid
|
||||
a.insert(lo, x)
|
||||
|
||||
|
||||
def bisect_left(a, x, lo=0, hi=None):
|
||||
"""Return the index where to insert item x in list a, assuming a is sorted.
|
||||
|
||||
The return value i is such that all e in a[:i] have e < x, and all e in
|
||||
a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
|
||||
insert just before the leftmost x already there.
|
||||
|
||||
Optional args lo (default 0) and hi (default len(a)) bound the
|
||||
slice of a to be searched.
|
||||
"""
|
||||
|
||||
if lo < 0:
|
||||
raise ValueError('lo must be non-negative')
|
||||
if hi is None:
|
||||
hi = len(a)
|
||||
while lo < hi:
|
||||
mid = (lo+hi)//2
|
||||
if a[mid] < x: lo = mid+1
|
||||
else: hi = mid
|
||||
return lo
|
||||
|
||||
# Overwrite above definitions with a fast C implementation
|
||||
try:
|
||||
from _bisect import *
|
||||
except ImportError:
|
||||
pass
|
Binary file not shown.
455
PythonHome/Lib/bsddb/__init__.py
Normal file
455
PythonHome/Lib/bsddb/__init__.py
Normal file
@ -0,0 +1,455 @@
|
||||
#----------------------------------------------------------------------
|
||||
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
|
||||
# and Andrew Kuchling. All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# o Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions, and the disclaimer that follows.
|
||||
#
|
||||
# o Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions, and the following disclaimer in
|
||||
# the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# o Neither the name of Digital Creations nor the names of its
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
|
||||
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
|
||||
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
||||
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
||||
# DAMAGE.
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
|
||||
"""Support for Berkeley DB 4.3 through 5.3 with a simple interface.
|
||||
|
||||
For the full featured object oriented interface use the bsddb.db module
|
||||
instead. It mirrors the Oracle Berkeley DB C API.
|
||||
"""
|
||||
|
||||
import sys
|
||||
absolute_import = (sys.version_info[0] >= 3)
|
||||
|
||||
if (sys.version_info >= (2, 6)) and (sys.version_info < (3, 0)) :
|
||||
import warnings
|
||||
if sys.py3kwarning and (__name__ != 'bsddb3') :
|
||||
warnings.warnpy3k("in 3.x, the bsddb module has been removed; "
|
||||
"please use the pybsddb project instead",
|
||||
DeprecationWarning, 2)
|
||||
warnings.filterwarnings("ignore", ".*CObject.*", DeprecationWarning,
|
||||
"bsddb.__init__")
|
||||
|
||||
try:
|
||||
if __name__ == 'bsddb3':
|
||||
# import _pybsddb binary as it should be the more recent version from
|
||||
# a standalone pybsddb addon package than the version included with
|
||||
# python as bsddb._bsddb.
|
||||
if absolute_import :
|
||||
# Because this syntaxis is not valid before Python 2.5
|
||||
exec("from . import _pybsddb")
|
||||
else :
|
||||
import _pybsddb
|
||||
_bsddb = _pybsddb
|
||||
from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
|
||||
else:
|
||||
import _bsddb
|
||||
from bsddb.dbutils import DeadlockWrap as _DeadlockWrap
|
||||
except ImportError:
|
||||
# Remove ourselves from sys.modules
|
||||
import sys
|
||||
del sys.modules[__name__]
|
||||
raise
|
||||
|
||||
# bsddb3 calls it db, but provide _db for backwards compatibility
|
||||
db = _db = _bsddb
|
||||
__version__ = db.__version__
|
||||
|
||||
error = db.DBError # So bsddb.error will mean something...
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
import sys, os
|
||||
|
||||
from weakref import ref
|
||||
|
||||
if sys.version_info < (2, 6) :
|
||||
import UserDict
|
||||
MutableMapping = UserDict.DictMixin
|
||||
else :
|
||||
import collections
|
||||
MutableMapping = collections.MutableMapping
|
||||
|
||||
class _iter_mixin(MutableMapping):
|
||||
def _make_iter_cursor(self):
|
||||
cur = _DeadlockWrap(self.db.cursor)
|
||||
key = id(cur)
|
||||
self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
|
||||
return cur
|
||||
|
||||
def _gen_cref_cleaner(self, key):
|
||||
# use generate the function for the weakref callback here
|
||||
# to ensure that we do not hold a strict reference to cur
|
||||
# in the callback.
|
||||
return lambda ref: self._cursor_refs.pop(key, None)
|
||||
|
||||
def __iter__(self):
|
||||
self._kill_iteration = False
|
||||
self._in_iter += 1
|
||||
try:
|
||||
try:
|
||||
cur = self._make_iter_cursor()
|
||||
|
||||
# FIXME-20031102-greg: race condition. cursor could
|
||||
# be closed by another thread before this call.
|
||||
|
||||
# since we're only returning keys, we call the cursor
|
||||
# methods with flags=0, dlen=0, dofs=0
|
||||
key = _DeadlockWrap(cur.first, 0,0,0)[0]
|
||||
yield key
|
||||
|
||||
next = getattr(cur, "next")
|
||||
while 1:
|
||||
try:
|
||||
key = _DeadlockWrap(next, 0,0,0)[0]
|
||||
yield key
|
||||
except _bsddb.DBCursorClosedError:
|
||||
if self._kill_iteration:
|
||||
raise RuntimeError('Database changed size '
|
||||
'during iteration.')
|
||||
cur = self._make_iter_cursor()
|
||||
# FIXME-20031101-greg: race condition. cursor could
|
||||
# be closed by another thread before this call.
|
||||
_DeadlockWrap(cur.set, key,0,0,0)
|
||||
next = getattr(cur, "next")
|
||||
except _bsddb.DBNotFoundError:
|
||||
pass
|
||||
except _bsddb.DBCursorClosedError:
|
||||
# the database was modified during iteration. abort.
|
||||
pass
|
||||
# When Python 2.4 not supported in bsddb3, we can change this to "finally"
|
||||
except :
|
||||
self._in_iter -= 1
|
||||
raise
|
||||
|
||||
self._in_iter -= 1
|
||||
|
||||
def iteritems(self):
|
||||
if not self.db:
|
||||
return
|
||||
self._kill_iteration = False
|
||||
self._in_iter += 1
|
||||
try:
|
||||
try:
|
||||
cur = self._make_iter_cursor()
|
||||
|
||||
# FIXME-20031102-greg: race condition. cursor could
|
||||
# be closed by another thread before this call.
|
||||
|
||||
kv = _DeadlockWrap(cur.first)
|
||||
key = kv[0]
|
||||
yield kv
|
||||
|
||||
next = getattr(cur, "next")
|
||||
while 1:
|
||||
try:
|
||||
kv = _DeadlockWrap(next)
|
||||
key = kv[0]
|
||||
yield kv
|
||||
except _bsddb.DBCursorClosedError:
|
||||
if self._kill_iteration:
|
||||
raise RuntimeError('Database changed size '
|
||||
'during iteration.')
|
||||
cur = self._make_iter_cursor()
|
||||
# FIXME-20031101-greg: race condition. cursor could
|
||||
# be closed by another thread before this call.
|
||||
_DeadlockWrap(cur.set, key,0,0,0)
|
||||
next = getattr(cur, "next")
|
||||
except _bsddb.DBNotFoundError:
|
||||
pass
|
||||
except _bsddb.DBCursorClosedError:
|
||||
# the database was modified during iteration. abort.
|
||||
pass
|
||||
# When Python 2.4 not supported in bsddb3, we can change this to "finally"
|
||||
except :
|
||||
self._in_iter -= 1
|
||||
raise
|
||||
|
||||
self._in_iter -= 1
|
||||
|
||||
|
||||
class _DBWithCursor(_iter_mixin):
|
||||
"""
|
||||
A simple wrapper around DB that makes it look like the bsddbobject in
|
||||
the old module. It uses a cursor as needed to provide DB traversal.
|
||||
"""
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.set_get_returns_none(0)
|
||||
|
||||
# FIXME-20031101-greg: I believe there is still the potential
|
||||
# for deadlocks in a multithreaded environment if someone
|
||||
# attempts to use the any of the cursor interfaces in one
|
||||
# thread while doing a put or delete in another thread. The
|
||||
# reason is that _checkCursor and _closeCursors are not atomic
|
||||
# operations. Doing our own locking around self.dbc,
|
||||
# self.saved_dbc_key and self._cursor_refs could prevent this.
|
||||
# TODO: A test case demonstrating the problem needs to be written.
|
||||
|
||||
# self.dbc is a DBCursor object used to implement the
|
||||
# first/next/previous/last/set_location methods.
|
||||
self.dbc = None
|
||||
self.saved_dbc_key = None
|
||||
|
||||
# a collection of all DBCursor objects currently allocated
|
||||
# by the _iter_mixin interface.
|
||||
self._cursor_refs = {}
|
||||
self._in_iter = 0
|
||||
self._kill_iteration = False
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def _checkCursor(self):
|
||||
if self.dbc is None:
|
||||
self.dbc = _DeadlockWrap(self.db.cursor)
|
||||
if self.saved_dbc_key is not None:
|
||||
_DeadlockWrap(self.dbc.set, self.saved_dbc_key)
|
||||
self.saved_dbc_key = None
|
||||
|
||||
# This method is needed for all non-cursor DB calls to avoid
|
||||
# Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK
|
||||
# and DB_THREAD to be thread safe) when intermixing database
|
||||
# operations that use the cursor internally with those that don't.
|
||||
def _closeCursors(self, save=1):
|
||||
if self.dbc:
|
||||
c = self.dbc
|
||||
self.dbc = None
|
||||
if save:
|
||||
try:
|
||||
self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0]
|
||||
except db.DBError:
|
||||
pass
|
||||
_DeadlockWrap(c.close)
|
||||
del c
|
||||
for cref in self._cursor_refs.values():
|
||||
c = cref()
|
||||
if c is not None:
|
||||
_DeadlockWrap(c.close)
|
||||
|
||||
def _checkOpen(self):
|
||||
if self.db is None:
|
||||
raise error, "BSDDB object has already been closed"
|
||||
|
||||
def isOpen(self):
|
||||
return self.db is not None
|
||||
|
||||
def __len__(self):
|
||||
self._checkOpen()
|
||||
return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
|
||||
|
||||
if sys.version_info >= (2, 6) :
|
||||
def __repr__(self) :
|
||||
if self.isOpen() :
|
||||
return repr(dict(_DeadlockWrap(self.db.items)))
|
||||
return repr(dict())
|
||||
|
||||
def __getitem__(self, key):
|
||||
self._checkOpen()
|
||||
return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._checkOpen()
|
||||
self._closeCursors()
|
||||
if self._in_iter and key not in self:
|
||||
self._kill_iteration = True
|
||||
def wrapF():
|
||||
self.db[key] = value
|
||||
_DeadlockWrap(wrapF) # self.db[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
self._checkOpen()
|
||||
self._closeCursors()
|
||||
if self._in_iter and key in self:
|
||||
self._kill_iteration = True
|
||||
def wrapF():
|
||||
del self.db[key]
|
||||
_DeadlockWrap(wrapF) # del self.db[key]
|
||||
|
||||
def close(self):
|
||||
self._closeCursors(save=0)
|
||||
if self.dbc is not None:
|
||||
_DeadlockWrap(self.dbc.close)
|
||||
v = 0
|
||||
if self.db is not None:
|
||||
v = _DeadlockWrap(self.db.close)
|
||||
self.dbc = None
|
||||
self.db = None
|
||||
return v
|
||||
|
||||
def keys(self):
|
||||
self._checkOpen()
|
||||
return _DeadlockWrap(self.db.keys)
|
||||
|
||||
def has_key(self, key):
|
||||
self._checkOpen()
|
||||
return _DeadlockWrap(self.db.has_key, key)
|
||||
|
||||
def set_location(self, key):
|
||||
self._checkOpen()
|
||||
self._checkCursor()
|
||||
return _DeadlockWrap(self.dbc.set_range, key)
|
||||
|
||||
def next(self): # Renamed by "2to3"
|
||||
self._checkOpen()
|
||||
self._checkCursor()
|
||||
rv = _DeadlockWrap(getattr(self.dbc, "next"))
|
||||
return rv
|
||||
|
||||
if sys.version_info[0] >= 3 : # For "2to3" conversion
|
||||
next = __next__
|
||||
|
||||
def previous(self):
|
||||
self._checkOpen()
|
||||
self._checkCursor()
|
||||
rv = _DeadlockWrap(self.dbc.prev)
|
||||
return rv
|
||||
|
||||
def first(self):
|
||||
self._checkOpen()
|
||||
# fix 1725856: don't needlessly try to restore our cursor position
|
||||
self.saved_dbc_key = None
|
||||
self._checkCursor()
|
||||
rv = _DeadlockWrap(self.dbc.first)
|
||||
return rv
|
||||
|
||||
def last(self):
|
||||
self._checkOpen()
|
||||
# fix 1725856: don't needlessly try to restore our cursor position
|
||||
self.saved_dbc_key = None
|
||||
self._checkCursor()
|
||||
rv = _DeadlockWrap(self.dbc.last)
|
||||
return rv
|
||||
|
||||
def sync(self):
|
||||
self._checkOpen()
|
||||
return _DeadlockWrap(self.db.sync)
|
||||
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
# Compatibility object factory functions
|
||||
|
||||
def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
|
||||
cachesize=None, lorder=None, hflags=0):
|
||||
|
||||
flags = _checkflag(flag, file)
|
||||
e = _openDBEnv(cachesize)
|
||||
d = db.DB(e)
|
||||
d.set_flags(hflags)
|
||||
if pgsize is not None: d.set_pagesize(pgsize)
|
||||
if lorder is not None: d.set_lorder(lorder)
|
||||
if ffactor is not None: d.set_h_ffactor(ffactor)
|
||||
if nelem is not None: d.set_h_nelem(nelem)
|
||||
d.open(file, db.DB_HASH, flags, mode)
|
||||
return _DBWithCursor(d)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
def btopen(file, flag='c', mode=0666,
|
||||
btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
|
||||
pgsize=None, lorder=None):
|
||||
|
||||
flags = _checkflag(flag, file)
|
||||
e = _openDBEnv(cachesize)
|
||||
d = db.DB(e)
|
||||
if pgsize is not None: d.set_pagesize(pgsize)
|
||||
if lorder is not None: d.set_lorder(lorder)
|
||||
d.set_flags(btflags)
|
||||
if minkeypage is not None: d.set_bt_minkey(minkeypage)
|
||||
if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
|
||||
d.open(file, db.DB_BTREE, flags, mode)
|
||||
return _DBWithCursor(d)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
|
||||
def rnopen(file, flag='c', mode=0666,
|
||||
rnflags=0, cachesize=None, pgsize=None, lorder=None,
|
||||
rlen=None, delim=None, source=None, pad=None):
|
||||
|
||||
flags = _checkflag(flag, file)
|
||||
e = _openDBEnv(cachesize)
|
||||
d = db.DB(e)
|
||||
if pgsize is not None: d.set_pagesize(pgsize)
|
||||
if lorder is not None: d.set_lorder(lorder)
|
||||
d.set_flags(rnflags)
|
||||
if delim is not None: d.set_re_delim(delim)
|
||||
if rlen is not None: d.set_re_len(rlen)
|
||||
if source is not None: d.set_re_source(source)
|
||||
if pad is not None: d.set_re_pad(pad)
|
||||
d.open(file, db.DB_RECNO, flags, mode)
|
||||
return _DBWithCursor(d)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
def _openDBEnv(cachesize):
|
||||
e = db.DBEnv()
|
||||
if cachesize is not None:
|
||||
if cachesize >= 20480:
|
||||
e.set_cachesize(0, cachesize)
|
||||
else:
|
||||
raise error, "cachesize must be >= 20480"
|
||||
e.set_lk_detect(db.DB_LOCK_DEFAULT)
|
||||
e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
|
||||
return e
|
||||
|
||||
def _checkflag(flag, file):
|
||||
if flag == 'r':
|
||||
flags = db.DB_RDONLY
|
||||
elif flag == 'rw':
|
||||
flags = 0
|
||||
elif flag == 'w':
|
||||
flags = db.DB_CREATE
|
||||
elif flag == 'c':
|
||||
flags = db.DB_CREATE
|
||||
elif flag == 'n':
|
||||
flags = db.DB_CREATE
|
||||
#flags = db.DB_CREATE | db.DB_TRUNCATE
|
||||
# we used db.DB_TRUNCATE flag for this before but Berkeley DB
|
||||
# 4.2.52 changed to disallowed truncate with txn environments.
|
||||
if file is not None and os.path.isfile(file):
|
||||
os.unlink(file)
|
||||
else:
|
||||
raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
|
||||
return flags | db.DB_THREAD
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
|
||||
# This is a silly little hack that allows apps to continue to use the
|
||||
# DB_THREAD flag even on systems without threads without freaking out
|
||||
# Berkeley DB.
|
||||
#
|
||||
# This assumes that if Python was built with thread support then
|
||||
# Berkeley DB was too.
|
||||
|
||||
try:
|
||||
# 2to3 automatically changes "import thread" to "import _thread"
|
||||
import thread as T
|
||||
del T
|
||||
|
||||
except ImportError:
|
||||
db.DB_THREAD = 0
|
||||
|
||||
#----------------------------------------------------------------------
|
Binary file not shown.
60
PythonHome/Lib/bsddb/db.py
Normal file
60
PythonHome/Lib/bsddb/db.py
Normal file
@ -0,0 +1,60 @@
|
||||
#----------------------------------------------------------------------
|
||||
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
|
||||
# and Andrew Kuchling. All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# o Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions, and the disclaimer that follows.
|
||||
#
|
||||
# o Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions, and the following disclaimer in
|
||||
# the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# o Neither the name of Digital Creations nor the names of its
|
||||
# contributors may be used to endorse or promote products derived
|
||||
# from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
|
||||
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
|
||||
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
||||
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
||||
# DAMAGE.
|
||||
#----------------------------------------------------------------------
|
||||
|
||||
|
||||
# This module is just a placeholder for possible future expansion, in
|
||||
# case we ever want to augment the stuff in _db in any way. For now
|
||||
# it just simply imports everything from _db.
|
||||
|
||||
import sys
|
||||
absolute_import = (sys.version_info[0] >= 3)
|
||||
|
||||
if not absolute_import :
|
||||
if __name__.startswith('bsddb3.') :
|
||||
# import _pybsddb binary as it should be the more recent version from
|
||||
# a standalone pybsddb addon package than the version included with
|
||||
# python as bsddb._bsddb.
|
||||
from _pybsddb import *
|
||||
from _pybsddb import __version__
|
||||
else:
|
||||
from _bsddb import *
|
||||
from _bsddb import __version__
|
||||
else :
|
||||
# Because this syntaxis is not valid before Python 2.5
|
||||
if __name__.startswith('bsddb3.') :
|
||||
exec("from ._pybsddb import *")
|
||||
exec("from ._pybsddb import __version__")
|
||||
else :
|
||||
exec("from ._bsddb import *")
|
||||
exec("from ._bsddb import __version__")
|
Binary file not shown.
266
PythonHome/Lib/bsddb/dbobj.py
Normal file
266
PythonHome/Lib/bsddb/dbobj.py
Normal file
@ -0,0 +1,266 @@
|
||||
#-------------------------------------------------------------------------
|
||||
# This file contains real Python object wrappers for DB and DBEnv
|
||||
# C "objects" that can be usefully subclassed. The previous SWIG
|
||||
# based interface allowed this thanks to SWIG's shadow classes.
|
||||
# -- Gregory P. Smith
|
||||
#-------------------------------------------------------------------------
|
||||
#
|
||||
# (C) Copyright 2001 Autonomous Zone Industries
|
||||
#
|
||||
# License: This is free software. You may use this software for any
|
||||
# purpose including modification/redistribution, so long as
|
||||
# this header remains intact and that you do not claim any
|
||||
# rights of ownership or authorship of this software. This
|
||||
# software has been tested, but no warranty is expressed or
|
||||
# implied.
|
||||
#
|
||||
|
||||
#
|
||||
# TODO it would be *really nice* to have an automatic shadow class populator
|
||||
# so that new methods don't need to be added here manually after being
|
||||
# added to _bsddb.c.
|
||||
#
|
||||
|
||||
import sys
|
||||
absolute_import = (sys.version_info[0] >= 3)
|
||||
if absolute_import :
|
||||
# Because this syntaxis is not valid before Python 2.5
|
||||
exec("from . import db")
|
||||
else :
|
||||
import db
|
||||
|
||||
if sys.version_info < (2, 6) :
|
||||
from UserDict import DictMixin as MutableMapping
|
||||
else :
|
||||
import collections
|
||||
MutableMapping = collections.MutableMapping
|
||||
|
||||
class DBEnv:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._cobj = db.DBEnv(*args, **kwargs)
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
return self._cobj.close(*args, **kwargs)
|
||||
def open(self, *args, **kwargs):
|
||||
return self._cobj.open(*args, **kwargs)
|
||||
def remove(self, *args, **kwargs):
|
||||
return self._cobj.remove(*args, **kwargs)
|
||||
def set_shm_key(self, *args, **kwargs):
|
||||
return self._cobj.set_shm_key(*args, **kwargs)
|
||||
def set_cachesize(self, *args, **kwargs):
|
||||
return self._cobj.set_cachesize(*args, **kwargs)
|
||||
def set_data_dir(self, *args, **kwargs):
|
||||
return self._cobj.set_data_dir(*args, **kwargs)
|
||||
def set_flags(self, *args, **kwargs):
|
||||
return self._cobj.set_flags(*args, **kwargs)
|
||||
def set_lg_bsize(self, *args, **kwargs):
|
||||
return self._cobj.set_lg_bsize(*args, **kwargs)
|
||||
def set_lg_dir(self, *args, **kwargs):
|
||||
return self._cobj.set_lg_dir(*args, **kwargs)
|
||||
def set_lg_max(self, *args, **kwargs):
|
||||
return self._cobj.set_lg_max(*args, **kwargs)
|
||||
def set_lk_detect(self, *args, **kwargs):
|
||||
return self._cobj.set_lk_detect(*args, **kwargs)
|
||||
if db.version() < (4,5):
|
||||
def set_lk_max(self, *args, **kwargs):
|
||||
return self._cobj.set_lk_max(*args, **kwargs)
|
||||
def set_lk_max_locks(self, *args, **kwargs):
|
||||
return self._cobj.set_lk_max_locks(*args, **kwargs)
|
||||
def set_lk_max_lockers(self, *args, **kwargs):
|
||||
return self._cobj.set_lk_max_lockers(*args, **kwargs)
|
||||
def set_lk_max_objects(self, *args, **kwargs):
|
||||
return self._cobj.set_lk_max_objects(*args, **kwargs)
|
||||
def set_mp_mmapsize(self, *args, **kwargs):
|
||||
return self._cobj.set_mp_mmapsize(*args, **kwargs)
|
||||
def set_timeout(self, *args, **kwargs):
|
||||
return self._cobj.set_timeout(*args, **kwargs)
|
||||
def set_tmp_dir(self, *args, **kwargs):
|
||||
return self._cobj.set_tmp_dir(*args, **kwargs)
|
||||
def txn_begin(self, *args, **kwargs):
|
||||
return self._cobj.txn_begin(*args, **kwargs)
|
||||
def txn_checkpoint(self, *args, **kwargs):
|
||||
return self._cobj.txn_checkpoint(*args, **kwargs)
|
||||
def txn_stat(self, *args, **kwargs):
|
||||
return self._cobj.txn_stat(*args, **kwargs)
|
||||
def set_tx_max(self, *args, **kwargs):
|
||||
return self._cobj.set_tx_max(*args, **kwargs)
|
||||
def set_tx_timestamp(self, *args, **kwargs):
|
||||
return self._cobj.set_tx_timestamp(*args, **kwargs)
|
||||
def lock_detect(self, *args, **kwargs):
|
||||
return self._cobj.lock_detect(*args, **kwargs)
|
||||
def lock_get(self, *args, **kwargs):
|
||||
return self._cobj.lock_get(*args, **kwargs)
|
||||
def lock_id(self, *args, **kwargs):
|
||||
return self._cobj.lock_id(*args, **kwargs)
|
||||
def lock_put(self, *args, **kwargs):
|
||||
return self._cobj.lock_put(*args, **kwargs)
|
||||
def lock_stat(self, *args, **kwargs):
|
||||
return self._cobj.lock_stat(*args, **kwargs)
|
||||
def log_archive(self, *args, **kwargs):
|
||||
return self._cobj.log_archive(*args, **kwargs)
|
||||
|
||||
def set_get_returns_none(self, *args, **kwargs):
|
||||
return self._cobj.set_get_returns_none(*args, **kwargs)
|
||||
|
||||
def log_stat(self, *args, **kwargs):
|
||||
return self._cobj.log_stat(*args, **kwargs)
|
||||
|
||||
def dbremove(self, *args, **kwargs):
|
||||
return self._cobj.dbremove(*args, **kwargs)
|
||||
def dbrename(self, *args, **kwargs):
|
||||
return self._cobj.dbrename(*args, **kwargs)
|
||||
def set_encrypt(self, *args, **kwargs):
|
||||
return self._cobj.set_encrypt(*args, **kwargs)
|
||||
|
||||
if db.version() >= (4,4):
|
||||
def fileid_reset(self, *args, **kwargs):
|
||||
return self._cobj.fileid_reset(*args, **kwargs)
|
||||
|
||||
def lsn_reset(self, *args, **kwargs):
|
||||
return self._cobj.lsn_reset(*args, **kwargs)
|
||||
|
||||
|
||||
class DB(MutableMapping):
|
||||
def __init__(self, dbenv, *args, **kwargs):
|
||||
# give it the proper DBEnv C object that its expecting
|
||||
self._cobj = db.DB(*((dbenv._cobj,) + args), **kwargs)
|
||||
|
||||
# TODO are there other dict methods that need to be overridden?
|
||||
def __len__(self):
|
||||
return len(self._cobj)
|
||||
def __getitem__(self, arg):
|
||||
return self._cobj[arg]
|
||||
def __setitem__(self, key, value):
|
||||
self._cobj[key] = value
|
||||
def __delitem__(self, arg):
|
||||
del self._cobj[arg]
|
||||
|
||||
if sys.version_info >= (2, 6) :
|
||||
def __iter__(self) :
|
||||
return self._cobj.__iter__()
|
||||
|
||||
def append(self, *args, **kwargs):
|
||||
return self._cobj.append(*args, **kwargs)
|
||||
def associate(self, *args, **kwargs):
|
||||
return self._cobj.associate(*args, **kwargs)
|
||||
def close(self, *args, **kwargs):
|
||||
return self._cobj.close(*args, **kwargs)
|
||||
def consume(self, *args, **kwargs):
|
||||
return self._cobj.consume(*args, **kwargs)
|
||||
def consume_wait(self, *args, **kwargs):
|
||||
return self._cobj.consume_wait(*args, **kwargs)
|
||||
def cursor(self, *args, **kwargs):
|
||||
return self._cobj.cursor(*args, **kwargs)
|
||||
def delete(self, *args, **kwargs):
|
||||
return self._cobj.delete(*args, **kwargs)
|
||||
def fd(self, *args, **kwargs):
|
||||
return self._cobj.fd(*args, **kwargs)
|
||||
def get(self, *args, **kwargs):
|
||||
return self._cobj.get(*args, **kwargs)
|
||||
def pget(self, *args, **kwargs):
|
||||
return self._cobj.pget(*args, **kwargs)
|
||||
def get_both(self, *args, **kwargs):
|
||||
return self._cobj.get_both(*args, **kwargs)
|
||||
def get_byteswapped(self, *args, **kwargs):
|
||||
return self._cobj.get_byteswapped(*args, **kwargs)
|
||||
def get_size(self, *args, **kwargs):
|
||||
return self._cobj.get_size(*args, **kwargs)
|
||||
def get_type(self, *args, **kwargs):
|
||||
return self._cobj.get_type(*args, **kwargs)
|
||||
def join(self, *args, **kwargs):
|
||||
return self._cobj.join(*args, **kwargs)
|
||||
def key_range(self, *args, **kwargs):
|
||||
return self._cobj.key_range(*args, **kwargs)
|
||||
def has_key(self, *args, **kwargs):
|
||||
return self._cobj.has_key(*args, **kwargs)
|
||||
def items(self, *args, **kwargs):
|
||||
return self._cobj.items(*args, **kwargs)
|
||||
def keys(self, *args, **kwargs):
|
||||
return self._cobj.keys(*args, **kwargs)
|
||||
def open(self, *args, **kwargs):
|
||||
return self._cobj.open(*args, **kwargs)
|
||||
def put(self, *args, **kwargs):
|
||||
return self._cobj.put(*args, **kwargs)
|
||||
def remove(self, *args, **kwargs):
|
||||
return self._cobj.remove(*args, **kwargs)
|
||||
def rename(self, *args, **kwargs):
|
||||
return self._cobj.rename(*args, **kwargs)
|
||||
def set_bt_minkey(self, *args, **kwargs):
|
||||
return self._cobj.set_bt_minkey(*args, **kwargs)
|
||||
def set_bt_compare(self, *args, **kwargs):
|
||||
return self._cobj.set_bt_compare(*args, **kwargs)
|
||||
def set_cachesize(self, *args, **kwargs):
|
||||
return self._cobj.set_cachesize(*args, **kwargs)
|
||||
def set_dup_compare(self, *args, **kwargs) :
|
||||
return self._cobj.set_dup_compare(*args, **kwargs)
|
||||
def set_flags(self, *args, **kwargs):
|
||||
return self._cobj.set_flags(*args, **kwargs)
|
||||
def set_h_ffactor(self, *args, **kwargs):
|
||||
return self._cobj.set_h_ffactor(*args, **kwargs)
|
||||
def set_h_nelem(self, *args, **kwargs):
|
||||
return self._cobj.set_h_nelem(*args, **kwargs)
|
||||
def set_lorder(self, *args, **kwargs):
|
||||
return self._cobj.set_lorder(*args, **kwargs)
|
||||
def set_pagesize(self, *args, **kwargs):
|
||||
return self._cobj.set_pagesize(*args, **kwargs)
|
||||
def set_re_delim(self, *args, **kwargs):
|
||||
return self._cobj.set_re_delim(*args, **kwargs)
|
||||
def set_re_len(self, *args, **kwargs):
|
||||
return self._cobj.set_re_len(*args, **kwargs)
|
||||
def set_re_pad(self, *args, **kwargs):
|
||||
return self._cobj.set_re_pad(*args, **kwargs)
|
||||
def set_re_source(self, *args, **kwargs):
|
||||
return self._cobj.set_re_source(*args, **kwargs)
|
||||
def set_q_extentsize(self, *args, **kwargs):
|
||||
return self._cobj.set_q_extentsize(*args, **kwargs)
|
||||
def stat(self, *args, **kwargs):
|
||||
return self._cobj.stat(*args, **kwargs)
|
||||
def sync(self, *args, **kwargs):
|
||||
return self._cobj.sync(*args, **kwargs)
|
||||
def type(self, *args, **kwargs):
|
||||
return self._cobj.type(*args, **kwargs)
|
||||
def upgrade(self, *args, **kwargs):
|
||||
return self._cobj.upgrade(*args, **kwargs)
|
||||
def values(self, *args, **kwargs):
|
||||
return self._cobj.values(*args, **kwargs)
|
||||
def verify(self, *args, **kwargs):
|
||||
return self._cobj.verify(*args, **kwargs)
|
||||
def set_get_returns_none(self, *args, **kwargs):
|
||||
return self._cobj.set_get_returns_none(*args, **kwargs)
|
||||
|
||||
def set_encrypt(self, *args, **kwargs):
|
||||
return self._cobj.set_encrypt(*args, **kwargs)
|
||||
|
||||
|
||||
class DBSequence:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._cobj = db.DBSequence(*args, **kwargs)
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
return self._cobj.close(*args, **kwargs)
|
||||
def get(self, *args, **kwargs):
|
||||
return self._cobj.get(*args, **kwargs)
|
||||
def get_dbp(self, *args, **kwargs):
|
||||
return self._cobj.get_dbp(*args, **kwargs)
|
||||
def get_key(self, *args, **kwargs):
|
||||
return self._cobj.get_key(*args, **kwargs)
|
||||
def init_value(self, *args, **kwargs):
|
||||
return self._cobj.init_value(*args, **kwargs)
|
||||
def open(self, *args, **kwargs):
|
||||
return self._cobj.open(*args, **kwargs)
|
||||
def remove(self, *args, **kwargs):
|
||||
return self._cobj.remove(*args, **kwargs)
|
||||
def stat(self, *args, **kwargs):
|
||||
return self._cobj.stat(*args, **kwargs)
|
||||
def set_cachesize(self, *args, **kwargs):
|
||||
return self._cobj.set_cachesize(*args, **kwargs)
|
||||
def set_flags(self, *args, **kwargs):
|
||||
return self._cobj.set_flags(*args, **kwargs)
|
||||
def set_range(self, *args, **kwargs):
|
||||
return self._cobj.set_range(*args, **kwargs)
|
||||
def get_cachesize(self, *args, **kwargs):
|
||||
return self._cobj.get_cachesize(*args, **kwargs)
|
||||
def get_flags(self, *args, **kwargs):
|
||||
return self._cobj.get_flags(*args, **kwargs)
|
||||
def get_range(self, *args, **kwargs):
|
||||
return self._cobj.get_range(*args, **kwargs)
|
Binary file not shown.
190
PythonHome/Lib/bsddb/dbrecio.py
Normal file
190
PythonHome/Lib/bsddb/dbrecio.py
Normal file
@ -0,0 +1,190 @@
|
||||
|
||||
"""
|
||||
File-like objects that read from or write to a bsddb record.
|
||||
|
||||
This implements (nearly) all stdio methods.
|
||||
|
||||
f = DBRecIO(db, key, txn=None)
|
||||
f.close() # explicitly release resources held
|
||||
flag = f.isatty() # always false
|
||||
pos = f.tell() # get current position
|
||||
f.seek(pos) # set current position
|
||||
f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
|
||||
buf = f.read() # read until EOF
|
||||
buf = f.read(n) # read up to n bytes
|
||||
f.truncate([size]) # truncate file at to at most size (default: current pos)
|
||||
f.write(buf) # write at current position
|
||||
f.writelines(list) # for line in list: f.write(line)
|
||||
|
||||
Notes:
|
||||
- fileno() is left unimplemented so that code which uses it triggers
|
||||
an exception early.
|
||||
- There's a simple test set (see end of this file) - not yet updated
|
||||
for DBRecIO.
|
||||
- readline() is not implemented yet.
|
||||
|
||||
|
||||
From:
|
||||
Itamar Shtull-Trauring <itamar@maxnm.com>
|
||||
"""
|
||||
|
||||
import errno
|
||||
import string
|
||||
|
||||
class DBRecIO:
|
||||
def __init__(self, db, key, txn=None):
|
||||
self.db = db
|
||||
self.key = key
|
||||
self.txn = txn
|
||||
self.len = None
|
||||
self.pos = 0
|
||||
self.closed = 0
|
||||
self.softspace = 0
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.closed = 1
|
||||
del self.db, self.txn
|
||||
|
||||
def isatty(self):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
return 0
|
||||
|
||||
def seek(self, pos, mode = 0):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
if mode == 1:
|
||||
pos = pos + self.pos
|
||||
elif mode == 2:
|
||||
pos = pos + self.len
|
||||
self.pos = max(0, pos)
|
||||
|
||||
def tell(self):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
return self.pos
|
||||
|
||||
def read(self, n = -1):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
if n < 0:
|
||||
newpos = self.len
|
||||
else:
|
||||
newpos = min(self.pos+n, self.len)
|
||||
|
||||
dlen = newpos - self.pos
|
||||
|
||||
r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos)
|
||||
self.pos = newpos
|
||||
return r
|
||||
|
||||
__fixme = """
|
||||
def readline(self, length=None):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
if self.buflist:
|
||||
self.buf = self.buf + string.joinfields(self.buflist, '')
|
||||
self.buflist = []
|
||||
i = string.find(self.buf, '\n', self.pos)
|
||||
if i < 0:
|
||||
newpos = self.len
|
||||
else:
|
||||
newpos = i+1
|
||||
if length is not None:
|
||||
if self.pos + length < newpos:
|
||||
newpos = self.pos + length
|
||||
r = self.buf[self.pos:newpos]
|
||||
self.pos = newpos
|
||||
return r
|
||||
|
||||
def readlines(self, sizehint = 0):
|
||||
total = 0
|
||||
lines = []
|
||||
line = self.readline()
|
||||
while line:
|
||||
lines.append(line)
|
||||
total += len(line)
|
||||
if 0 < sizehint <= total:
|
||||
break
|
||||
line = self.readline()
|
||||
return lines
|
||||
"""
|
||||
|
||||
def truncate(self, size=None):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
if size is None:
|
||||
size = self.pos
|
||||
elif size < 0:
|
||||
raise IOError(errno.EINVAL,
|
||||
"Negative size not allowed")
|
||||
elif size < self.pos:
|
||||
self.pos = size
|
||||
self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size)
|
||||
|
||||
def write(self, s):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
if not s: return
|
||||
if self.pos > self.len:
|
||||
self.buflist.append('\0'*(self.pos - self.len))
|
||||
self.len = self.pos
|
||||
newpos = self.pos + len(s)
|
||||
self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos)
|
||||
self.pos = newpos
|
||||
|
||||
def writelines(self, list):
|
||||
self.write(string.joinfields(list, ''))
|
||||
|
||||
def flush(self):
|
||||
if self.closed:
|
||||
raise ValueError, "I/O operation on closed file"
|
||||
|
||||
|
||||
"""
|
||||
# A little test suite
|
||||
|
||||
def _test():
|
||||
import sys
|
||||
if sys.argv[1:]:
|
||||
file = sys.argv[1]
|
||||
else:
|
||||
file = '/etc/passwd'
|
||||
lines = open(file, 'r').readlines()
|
||||
text = open(file, 'r').read()
|
||||
f = StringIO()
|
||||
for line in lines[:-2]:
|
||||
f.write(line)
|
||||
f.writelines(lines[-2:])
|
||||
if f.getvalue() != text:
|
||||
raise RuntimeError, 'write failed'
|
||||
length = f.tell()
|
||||
print 'File length =', length
|
||||
f.seek(len(lines[0]))
|
||||
f.write(lines[1])
|
||||
f.seek(0)
|
||||
print 'First line =', repr(f.readline())
|
||||
here = f.tell()
|
||||
line = f.readline()
|
||||
print 'Second line =', repr(line)
|
||||
f.seek(-len(line), 1)
|
||||
line2 = f.read(len(line))
|
||||
if line != line2:
|
||||
raise RuntimeError, 'bad result after seek back'
|
||||
f.seek(len(line2), 1)
|
||||
list = f.readlines()
|
||||
line = list[-1]
|
||||
f.seek(f.tell() - len(line))
|
||||
line2 = f.read()
|
||||
if line != line2:
|
||||
raise RuntimeError, 'bad result after seek back from EOF'
|
||||
print 'Read', len(list), 'more lines'
|
||||
print 'File length =', f.tell()
|
||||
if f.tell() != length:
|
||||
raise RuntimeError, 'bad length'
|
||||
f.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
_test()
|
||||
"""
|
Binary file not shown.
381
PythonHome/Lib/bsddb/dbshelve.py
Normal file
381
PythonHome/Lib/bsddb/dbshelve.py
Normal file
@ -0,0 +1,381 @@
|
||||
#------------------------------------------------------------------------
|
||||
# Copyright (c) 1997-2001 by Total Control Software
|
||||
# All Rights Reserved
|
||||
#------------------------------------------------------------------------
|
||||
#
|
||||
# Module Name: dbShelve.py
|
||||
#
|
||||
# Description: A reimplementation of the standard shelve.py that
|
||||
# forces the use of cPickle, and DB.
|
||||
#
|
||||
# Creation Date: 11/3/97 3:39:04PM
|
||||
#
|
||||
# License: This is free software. You may use this software for any
|
||||
# purpose including modification/redistribution, so long as
|
||||
# this header remains intact and that you do not claim any
|
||||
# rights of ownership or authorship of this software. This
|
||||
# software has been tested, but no warranty is expressed or
|
||||
# implied.
|
||||
#
|
||||
# 13-Dec-2000: Updated to be used with the new bsddb3 package.
|
||||
# Added DBShelfCursor class.
|
||||
#
|
||||
#------------------------------------------------------------------------
|
||||
|
||||
"""Manage shelves of pickled objects using bsddb database files for the
|
||||
storage.
|
||||
"""
|
||||
|
||||
#------------------------------------------------------------------------
|
||||
|
||||
import sys
|
||||
absolute_import = (sys.version_info[0] >= 3)
|
||||
if absolute_import :
|
||||
# Because this syntaxis is not valid before Python 2.5
|
||||
exec("from . import db")
|
||||
else :
|
||||
import db
|
||||
|
||||
if sys.version_info[0] >= 3 :
|
||||
import cPickle # Will be converted to "pickle" by "2to3"
|
||||
else :
|
||||
if sys.version_info < (2, 6) :
|
||||
import cPickle
|
||||
else :
|
||||
# When we drop support for python 2.4
|
||||
# we could use: (in 2.5 we need a __future__ statement)
|
||||
#
|
||||
# with warnings.catch_warnings():
|
||||
# warnings.filterwarnings(...)
|
||||
# ...
|
||||
#
|
||||
# We can not use "with" as is, because it would be invalid syntax
|
||||
# in python 2.4 and (with no __future__) 2.5.
|
||||
# Here we simulate "with" following PEP 343 :
|
||||
import warnings
|
||||
w = warnings.catch_warnings()
|
||||
w.__enter__()
|
||||
try :
|
||||
warnings.filterwarnings('ignore',
|
||||
message='the cPickle module has been removed in Python 3.0',
|
||||
category=DeprecationWarning)
|
||||
import cPickle
|
||||
finally :
|
||||
w.__exit__()
|
||||
del w
|
||||
|
||||
HIGHEST_PROTOCOL = cPickle.HIGHEST_PROTOCOL
|
||||
def _dumps(object, protocol):
|
||||
return cPickle.dumps(object, protocol=protocol)
|
||||
|
||||
if sys.version_info < (2, 6) :
|
||||
from UserDict import DictMixin as MutableMapping
|
||||
else :
|
||||
import collections
|
||||
MutableMapping = collections.MutableMapping
|
||||
|
||||
#------------------------------------------------------------------------
|
||||
|
||||
|
||||
def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
|
||||
dbenv=None, dbname=None):
|
||||
"""
|
||||
A simple factory function for compatibility with the standard
|
||||
shleve.py module. It can be used like this, where key is a string
|
||||
and data is a pickleable object:
|
||||
|
||||
from bsddb import dbshelve
|
||||
db = dbshelve.open(filename)
|
||||
|
||||
db[key] = data
|
||||
|
||||
db.close()
|
||||
"""
|
||||
if type(flags) == type(''):
|
||||
sflag = flags
|
||||
if sflag == 'r':
|
||||
flags = db.DB_RDONLY
|
||||
elif sflag == 'rw':
|
||||
flags = 0
|
||||
elif sflag == 'w':
|
||||
flags = db.DB_CREATE
|
||||
elif sflag == 'c':
|
||||
flags = db.DB_CREATE
|
||||
elif sflag == 'n':
|
||||
flags = db.DB_TRUNCATE | db.DB_CREATE
|
||||
else:
|
||||
raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
|
||||
|
||||
d = DBShelf(dbenv)
|
||||
d.open(filename, dbname, filetype, flags, mode)
|
||||
return d
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
class DBShelveError(db.DBError): pass
|
||||
|
||||
|
||||
class DBShelf(MutableMapping):
|
||||
"""A shelf to hold pickled objects, built upon a bsddb DB object. It
|
||||
automatically pickles/unpickles data objects going to/from the DB.
|
||||
"""
|
||||
def __init__(self, dbenv=None):
|
||||
self.db = db.DB(dbenv)
|
||||
self._closed = True
|
||||
if HIGHEST_PROTOCOL:
|
||||
self.protocol = HIGHEST_PROTOCOL
|
||||
else:
|
||||
self.protocol = 1
|
||||
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Many methods we can just pass through to the DB object.
|
||||
(See below)
|
||||
"""
|
||||
return getattr(self.db, name)
|
||||
|
||||
|
||||
#-----------------------------------
|
||||
# Dictionary access methods
|
||||
|
||||
def __len__(self):
|
||||
return len(self.db)
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
data = self.db[key]
|
||||
return cPickle.loads(data)
|
||||
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
data = _dumps(value, self.protocol)
|
||||
self.db[key] = data
|
||||
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.db[key]
|
||||
|
||||
|
||||
def keys(self, txn=None):
|
||||
if txn is not None:
|
||||
return self.db.keys(txn)
|
||||
else:
|
||||
return self.db.keys()
|
||||
|
||||
if sys.version_info >= (2, 6) :
|
||||
def __iter__(self) : # XXX: Load all keys in memory :-(
|
||||
for k in self.db.keys() :
|
||||
yield k
|
||||
|
||||
# Do this when "DB" support iteration
|
||||
# Or is it enough to pass thru "getattr"?
|
||||
#
|
||||
# def __iter__(self) :
|
||||
# return self.db.__iter__()
|
||||
|
||||
|
||||
def open(self, *args, **kwargs):
|
||||
self.db.open(*args, **kwargs)
|
||||
self._closed = False
|
||||
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
self.db.close(*args, **kwargs)
|
||||
self._closed = True
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
if self._closed:
|
||||
return '<DBShelf @ 0x%x - closed>' % (id(self))
|
||||
else:
|
||||
return repr(dict(self.iteritems()))
|
||||
|
||||
|
||||
def items(self, txn=None):
|
||||
if txn is not None:
|
||||
items = self.db.items(txn)
|
||||
else:
|
||||
items = self.db.items()
|
||||
newitems = []
|
||||
|
||||
for k, v in items:
|
||||
newitems.append( (k, cPickle.loads(v)) )
|
||||
return newitems
|
||||
|
||||
def values(self, txn=None):
|
||||
if txn is not None:
|
||||
values = self.db.values(txn)
|
||||
else:
|
||||
values = self.db.values()
|
||||
|
||||
return map(cPickle.loads, values)
|
||||
|
||||
#-----------------------------------
|
||||
# Other methods
|
||||
|
||||
def __append(self, value, txn=None):
|
||||
data = _dumps(value, self.protocol)
|
||||
return self.db.append(data, txn)
|
||||
|
||||
def append(self, value, txn=None):
|
||||
if self.get_type() == db.DB_RECNO:
|
||||
return self.__append(value, txn=txn)
|
||||
raise DBShelveError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO"
|
||||
|
||||
|
||||
def associate(self, secondaryDB, callback, flags=0):
|
||||
def _shelf_callback(priKey, priData, realCallback=callback):
|
||||
# Safe in Python 2.x because expresion short circuit
|
||||
if sys.version_info[0] < 3 or isinstance(priData, bytes) :
|
||||
data = cPickle.loads(priData)
|
||||
else :
|
||||
data = cPickle.loads(bytes(priData, "iso8859-1")) # 8 bits
|
||||
return realCallback(priKey, data)
|
||||
|
||||
return self.db.associate(secondaryDB, _shelf_callback, flags)
|
||||
|
||||
|
||||
#def get(self, key, default=None, txn=None, flags=0):
|
||||
def get(self, *args, **kw):
|
||||
# We do it with *args and **kw so if the default value wasn't
|
||||
# given nothing is passed to the extension module. That way
|
||||
# an exception can be raised if set_get_returns_none is turned
|
||||
# off.
|
||||
data = self.db.get(*args, **kw)
|
||||
try:
|
||||
return cPickle.loads(data)
|
||||
except (EOFError, TypeError, cPickle.UnpicklingError):
|
||||
return data # we may be getting the default value, or None,
|
||||
# so it doesn't need unpickled.
|
||||
|
||||
def get_both(self, key, value, txn=None, flags=0):
|
||||
data = _dumps(value, self.protocol)
|
||||
data = self.db.get(key, data, txn, flags)
|
||||
return cPickle.loads(data)
|
||||
|
||||
|
||||
def cursor(self, txn=None, flags=0):
|
||||
c = DBShelfCursor(self.db.cursor(txn, flags))
|
||||
c.protocol = self.protocol
|
||||
return c
|
||||
|
||||
|
||||
def put(self, key, value, txn=None, flags=0):
|
||||
data = _dumps(value, self.protocol)
|
||||
return self.db.put(key, data, txn, flags)
|
||||
|
||||
|
||||
def join(self, cursorList, flags=0):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
#----------------------------------------------
|
||||
# Methods allowed to pass-through to self.db
|
||||
#
|
||||
# close, delete, fd, get_byteswapped, get_type, has_key,
|
||||
# key_range, open, remove, rename, stat, sync,
|
||||
# upgrade, verify, and all set_* methods.
|
||||
|
||||
|
||||
#---------------------------------------------------------------------------
|
||||
|
||||
class DBShelfCursor:
|
||||
"""
|
||||
"""
|
||||
def __init__(self, cursor):
|
||||
self.dbc = cursor
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Some methods we can just pass through to the cursor object. (See below)"""
|
||||
return getattr(self.dbc, name)
|
||||
|
||||
|
||||
#----------------------------------------------
|
||||
|
||||
def dup(self, flags=0):
|
||||
c = DBShelfCursor(self.dbc.dup(flags))
|
||||
c.protocol = self.protocol
|
||||
return c
|
||||
|
||||
|
||||
def put(self, key, value, flags=0):
|
||||
data = _dumps(value, self.protocol)
|
||||
return self.dbc.put(key, data, flags)
|
||||
|
||||
|
||||
def get(self, *args):
|
||||
count = len(args) # a method overloading hack
|
||||
method = getattr(self, 'get_%d' % count)
|
||||
method(*args)
|
||||
|
||||
def get_1(self, flags):
|
||||
rec = self.dbc.get(flags)
|
||||
return self._extract(rec)
|
||||
|
||||
def get_2(self, key, flags):
|
||||
rec = self.dbc.get(key, flags)
|
||||
return self._extract(rec)
|
||||
|
||||
def get_3(self, key, value, flags):
|
||||
data = _dumps(value, self.protocol)
|
||||
rec = self.dbc.get(key, flags)
|
||||
return self._extract(rec)
|
||||
|
||||
|
||||
def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT)
|
||||
def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
|
||||
def last(self, flags=0): return self.get_1(flags|db.DB_LAST)
|
||||
def next(self, flags=0): return self.get_1(flags|db.DB_NEXT)
|
||||
def prev(self, flags=0): return self.get_1(flags|db.DB_PREV)
|
||||
def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME)
|
||||
def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP)
|
||||
def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP)
|
||||
def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP)
|
||||
|
||||
|
||||
def get_both(self, key, value, flags=0):
|
||||
data = _dumps(value, self.protocol)
|
||||
rec = self.dbc.get_both(key, flags)
|
||||
return self._extract(rec)
|
||||
|
||||
|
||||
def set(self, key, flags=0):
|
||||
rec = self.dbc.set(key, flags)
|
||||
return self._extract(rec)
|
||||
|
||||
def set_range(self, key, flags=0):
|
||||
rec = self.dbc.set_range(key, flags)
|
||||
return self._extract(rec)
|
||||
|
||||
def set_recno(self, recno, flags=0):
|
||||
rec = self.dbc.set_recno(recno, flags)
|
||||
return self._extract(rec)
|
||||
|
||||
set_both = get_both
|
||||
|
||||
def _extract(self, rec):
|
||||
if rec is None:
|
||||
return None
|
||||
else:
|
||||
key, data = rec
|
||||
# Safe in Python 2.x because expresion short circuit
|
||||
if sys.version_info[0] < 3 or isinstance(data, bytes) :
|
||||
return key, cPickle.loads(data)
|
||||
else :
|
||||
return key, cPickle.loads(bytes(data, "iso8859-1")) # 8 bits
|
||||
|
||||
#----------------------------------------------
|
||||
# Methods allowed to pass-through to self.dbc
|
||||
#
|
||||
# close, count, delete, get_recno, join_item
|
||||
|
||||
|
||||
#---------------------------------------------------------------------------
|
Binary file not shown.
843
PythonHome/Lib/bsddb/dbtables.py
Normal file
843
PythonHome/Lib/bsddb/dbtables.py
Normal file
@ -0,0 +1,843 @@
|
||||
#-----------------------------------------------------------------------
|
||||
#
|
||||
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
|
||||
# Copyright (C) 2002 Gregory P. Smith
|
||||
#
|
||||
# License: This is free software. You may use this software for any
|
||||
# purpose including modification/redistribution, so long as
|
||||
# this header remains intact and that you do not claim any
|
||||
# rights of ownership or authorship of this software. This
|
||||
# software has been tested, but no warranty is expressed or
|
||||
# implied.
|
||||
#
|
||||
# -- Gregory P. Smith <greg@krypto.org>
|
||||
|
||||
# This provides a simple database table interface built on top of
|
||||
# the Python Berkeley DB 3 interface.
|
||||
#
|
||||
_cvsid = '$Id$'
|
||||
|
||||
import re
|
||||
import sys
|
||||
import copy
|
||||
import random
|
||||
import struct
|
||||
|
||||
|
||||
if sys.version_info[0] >= 3 :
|
||||
import pickle
|
||||
else :
|
||||
if sys.version_info < (2, 6) :
|
||||
import cPickle as pickle
|
||||
else :
|
||||
# When we drop support for python 2.4
|
||||
# we could use: (in 2.5 we need a __future__ statement)
|
||||
#
|
||||
# with warnings.catch_warnings():
|
||||
# warnings.filterwarnings(...)
|
||||
# ...
|
||||
#
|
||||
# We can not use "with" as is, because it would be invalid syntax
|
||||
# in python 2.4 and (with no __future__) 2.5.
|
||||
# Here we simulate "with" following PEP 343 :
|
||||
import warnings
|
||||
w = warnings.catch_warnings()
|
||||
w.__enter__()
|
||||
try :
|
||||
warnings.filterwarnings('ignore',
|
||||
message='the cPickle module has been removed in Python 3.0',
|
||||
category=DeprecationWarning)
|
||||
import cPickle as pickle
|
||||
finally :
|
||||
w.__exit__()
|
||||
del w
|
||||
|
||||
try:
|
||||
# For Pythons w/distutils pybsddb
|
||||
from bsddb3 import db
|
||||
except ImportError:
|
||||
# For Python 2.3
|
||||
from bsddb import db
|
||||
|
||||
class TableDBError(StandardError):
|
||||
pass
|
||||
class TableAlreadyExists(TableDBError):
|
||||
pass
|
||||
|
||||
|
||||
class Cond:
|
||||
"""This condition matches everything"""
|
||||
def __call__(self, s):
|
||||
return 1
|
||||
|
||||
class ExactCond(Cond):
|
||||
"""Acts as an exact match condition function"""
|
||||
def __init__(self, strtomatch):
|
||||
self.strtomatch = strtomatch
|
||||
def __call__(self, s):
|
||||
return s == self.strtomatch
|
||||
|
||||
class PrefixCond(Cond):
|
||||
"""Acts as a condition function for matching a string prefix"""
|
||||
def __init__(self, prefix):
|
||||
self.prefix = prefix
|
||||
def __call__(self, s):
|
||||
return s[:len(self.prefix)] == self.prefix
|
||||
|
||||
class PostfixCond(Cond):
|
||||
"""Acts as a condition function for matching a string postfix"""
|
||||
def __init__(self, postfix):
|
||||
self.postfix = postfix
|
||||
def __call__(self, s):
|
||||
return s[-len(self.postfix):] == self.postfix
|
||||
|
||||
class LikeCond(Cond):
|
||||
"""
|
||||
Acts as a function that will match using an SQL 'LIKE' style
|
||||
string. Case insensitive and % signs are wild cards.
|
||||
This isn't perfect but it should work for the simple common cases.
|
||||
"""
|
||||
def __init__(self, likestr, re_flags=re.IGNORECASE):
|
||||
# escape python re characters
|
||||
chars_to_escape = '.*+()[]?'
|
||||
for char in chars_to_escape :
|
||||
likestr = likestr.replace(char, '\\'+char)
|
||||
# convert %s to wildcards
|
||||
self.likestr = likestr.replace('%', '.*')
|
||||
self.re = re.compile('^'+self.likestr+'$', re_flags)
|
||||
def __call__(self, s):
|
||||
return self.re.match(s)
|
||||
|
||||
#
|
||||
# keys used to store database metadata
|
||||
#
|
||||
_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
|
||||
_columns = '._COLUMNS__' # table_name+this key contains a list of columns
|
||||
|
||||
def _columns_key(table):
|
||||
return table + _columns
|
||||
|
||||
#
|
||||
# these keys are found within table sub databases
|
||||
#
|
||||
_data = '._DATA_.' # this+column+this+rowid key contains table data
|
||||
_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
|
||||
# row in the table. (no data is stored)
|
||||
_rowid_str_len = 8 # length in bytes of the unique rowid strings
|
||||
|
||||
|
||||
def _data_key(table, col, rowid):
|
||||
return table + _data + col + _data + rowid
|
||||
|
||||
def _search_col_data_key(table, col):
|
||||
return table + _data + col + _data
|
||||
|
||||
def _search_all_data_key(table):
|
||||
return table + _data
|
||||
|
||||
def _rowid_key(table, rowid):
|
||||
return table + _rowid + rowid + _rowid
|
||||
|
||||
def _search_rowid_key(table):
|
||||
return table + _rowid
|
||||
|
||||
def contains_metastrings(s) :
|
||||
"""Verify that the given string does not contain any
|
||||
metadata strings that might interfere with dbtables database operation.
|
||||
"""
|
||||
if (s.find(_table_names_key) >= 0 or
|
||||
s.find(_columns) >= 0 or
|
||||
s.find(_data) >= 0 or
|
||||
s.find(_rowid) >= 0):
|
||||
# Then
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
class bsdTableDB :
|
||||
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
|
||||
recover=0, dbflags=0):
|
||||
"""bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
|
||||
|
||||
Open database name in the dbhome Berkeley DB directory.
|
||||
Use keyword arguments when calling this constructor.
|
||||
"""
|
||||
self.db = None
|
||||
myflags = db.DB_THREAD
|
||||
if create:
|
||||
myflags |= db.DB_CREATE
|
||||
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
|
||||
db.DB_INIT_TXN | dbflags)
|
||||
# DB_AUTO_COMMIT isn't a valid flag for env.open()
|
||||
try:
|
||||
dbflags |= db.DB_AUTO_COMMIT
|
||||
except AttributeError:
|
||||
pass
|
||||
if recover:
|
||||
flagsforenv = flagsforenv | db.DB_RECOVER
|
||||
self.env = db.DBEnv()
|
||||
# enable auto deadlock avoidance
|
||||
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
|
||||
self.env.open(dbhome, myflags | flagsforenv)
|
||||
if truncate:
|
||||
myflags |= db.DB_TRUNCATE
|
||||
self.db = db.DB(self.env)
|
||||
# this code relies on DBCursor.set* methods to raise exceptions
|
||||
# rather than returning None
|
||||
self.db.set_get_returns_none(1)
|
||||
# allow duplicate entries [warning: be careful w/ metadata]
|
||||
self.db.set_flags(db.DB_DUP)
|
||||
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
|
||||
self.dbfilename = filename
|
||||
|
||||
if sys.version_info[0] >= 3 :
|
||||
class cursor_py3k(object) :
|
||||
def __init__(self, dbcursor) :
|
||||
self._dbcursor = dbcursor
|
||||
|
||||
def close(self) :
|
||||
return self._dbcursor.close()
|
||||
|
||||
def set_range(self, search) :
|
||||
v = self._dbcursor.set_range(bytes(search, "iso8859-1"))
|
||||
if v is not None :
|
||||
v = (v[0].decode("iso8859-1"),
|
||||
v[1].decode("iso8859-1"))
|
||||
return v
|
||||
|
||||
def __next__(self) :
|
||||
v = getattr(self._dbcursor, "next")()
|
||||
if v is not None :
|
||||
v = (v[0].decode("iso8859-1"),
|
||||
v[1].decode("iso8859-1"))
|
||||
return v
|
||||
|
||||
class db_py3k(object) :
|
||||
def __init__(self, db) :
|
||||
self._db = db
|
||||
|
||||
def cursor(self, txn=None) :
|
||||
return cursor_py3k(self._db.cursor(txn=txn))
|
||||
|
||||
def has_key(self, key, txn=None) :
|
||||
return getattr(self._db,"has_key")(bytes(key, "iso8859-1"),
|
||||
txn=txn)
|
||||
|
||||
def put(self, key, value, flags=0, txn=None) :
|
||||
key = bytes(key, "iso8859-1")
|
||||
if value is not None :
|
||||
value = bytes(value, "iso8859-1")
|
||||
return self._db.put(key, value, flags=flags, txn=txn)
|
||||
|
||||
def put_bytes(self, key, value, txn=None) :
|
||||
key = bytes(key, "iso8859-1")
|
||||
return self._db.put(key, value, txn=txn)
|
||||
|
||||
def get(self, key, txn=None, flags=0) :
|
||||
key = bytes(key, "iso8859-1")
|
||||
v = self._db.get(key, txn=txn, flags=flags)
|
||||
if v is not None :
|
||||
v = v.decode("iso8859-1")
|
||||
return v
|
||||
|
||||
def get_bytes(self, key, txn=None, flags=0) :
|
||||
key = bytes(key, "iso8859-1")
|
||||
return self._db.get(key, txn=txn, flags=flags)
|
||||
|
||||
def delete(self, key, txn=None) :
|
||||
key = bytes(key, "iso8859-1")
|
||||
return self._db.delete(key, txn=txn)
|
||||
|
||||
def close (self) :
|
||||
return self._db.close()
|
||||
|
||||
self.db = db_py3k(self.db)
|
||||
else : # Python 2.x
|
||||
pass
|
||||
|
||||
# Initialize the table names list if this is a new database
|
||||
txn = self.env.txn_begin()
|
||||
try:
|
||||
if not getattr(self.db, "has_key")(_table_names_key, txn):
|
||||
getattr(self.db, "put_bytes", self.db.put) \
|
||||
(_table_names_key, pickle.dumps([], 1), txn=txn)
|
||||
# Yes, bare except
|
||||
except:
|
||||
txn.abort()
|
||||
raise
|
||||
else:
|
||||
txn.commit()
|
||||
# TODO verify more of the database's metadata?
|
||||
self.__tablecolumns = {}
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
if self.db is not None:
|
||||
self.db.close()
|
||||
self.db = None
|
||||
if self.env is not None:
|
||||
self.env.close()
|
||||
self.env = None
|
||||
|
||||
def checkpoint(self, mins=0):
|
||||
self.env.txn_checkpoint(mins)
|
||||
|
||||
def sync(self):
|
||||
self.db.sync()
|
||||
|
||||
def _db_print(self) :
|
||||
"""Print the database to stdout for debugging"""
|
||||
print "******** Printing raw database for debugging ********"
|
||||
cur = self.db.cursor()
|
||||
try:
|
||||
key, data = cur.first()
|
||||
while 1:
|
||||
print repr({key: data})
|
||||
next = cur.next()
|
||||
if next:
|
||||
key, data = next
|
||||
else:
|
||||
cur.close()
|
||||
return
|
||||
except db.DBNotFoundError:
|
||||
cur.close()
|
||||
|
||||
|
||||
def CreateTable(self, table, columns):
|
||||
"""CreateTable(table, columns) - Create a new table in the database.
|
||||
|
||||
raises TableDBError if it already exists or for other DB errors.
|
||||
"""
|
||||
assert isinstance(columns, list)
|
||||
|
||||
txn = None
|
||||
try:
|
||||
# checking sanity of the table and column names here on
|
||||
# table creation will prevent problems elsewhere.
|
||||
if contains_metastrings(table):
|
||||
raise ValueError(
|
||||
"bad table name: contains reserved metastrings")
|
||||
for column in columns :
|
||||
if contains_metastrings(column):
|
||||
raise ValueError(
|
||||
"bad column name: contains reserved metastrings")
|
||||
|
||||
columnlist_key = _columns_key(table)
|
||||
if getattr(self.db, "has_key")(columnlist_key):
|
||||
raise TableAlreadyExists, "table already exists"
|
||||
|
||||
txn = self.env.txn_begin()
|
||||
# store the table's column info
|
||||
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
|
||||
pickle.dumps(columns, 1), txn=txn)
|
||||
|
||||
# add the table name to the tablelist
|
||||
tablelist = pickle.loads(getattr(self.db, "get_bytes",
|
||||
self.db.get) (_table_names_key, txn=txn, flags=db.DB_RMW))
|
||||
tablelist.append(table)
|
||||
# delete 1st, in case we opened with DB_DUP
|
||||
self.db.delete(_table_names_key, txn=txn)
|
||||
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
|
||||
pickle.dumps(tablelist, 1), txn=txn)
|
||||
|
||||
txn.commit()
|
||||
txn = None
|
||||
except db.DBError, dberror:
|
||||
if txn:
|
||||
txn.abort()
|
||||
if sys.version_info < (2, 6) :
|
||||
raise TableDBError, dberror[1]
|
||||
else :
|
||||
raise TableDBError, dberror.args[1]
|
||||
|
||||
|
||||
def ListTableColumns(self, table):
|
||||
"""Return a list of columns in the given table.
|
||||
[] if the table doesn't exist.
|
||||
"""
|
||||
assert isinstance(table, str)
|
||||
if contains_metastrings(table):
|
||||
raise ValueError, "bad table name: contains reserved metastrings"
|
||||
|
||||
columnlist_key = _columns_key(table)
|
||||
if not getattr(self.db, "has_key")(columnlist_key):
|
||||
return []
|
||||
pickledcolumnlist = getattr(self.db, "get_bytes",
|
||||
self.db.get)(columnlist_key)
|
||||
if pickledcolumnlist:
|
||||
return pickle.loads(pickledcolumnlist)
|
||||
else:
|
||||
return []
|
||||
|
||||
def ListTables(self):
|
||||
"""Return a list of tables in this database."""
|
||||
pickledtablelist = self.db.get_get(_table_names_key)
|
||||
if pickledtablelist:
|
||||
return pickle.loads(pickledtablelist)
|
||||
else:
|
||||
return []
|
||||
|
||||
def CreateOrExtendTable(self, table, columns):
|
||||
"""CreateOrExtendTable(table, columns)
|
||||
|
||||
Create a new table in the database.
|
||||
|
||||
If a table of this name already exists, extend it to have any
|
||||
additional columns present in the given list as well as
|
||||
all of its current columns.
|
||||
"""
|
||||
assert isinstance(columns, list)
|
||||
|
||||
try:
|
||||
self.CreateTable(table, columns)
|
||||
except TableAlreadyExists:
|
||||
# the table already existed, add any new columns
|
||||
txn = None
|
||||
try:
|
||||
columnlist_key = _columns_key(table)
|
||||
txn = self.env.txn_begin()
|
||||
|
||||
# load the current column list
|
||||
oldcolumnlist = pickle.loads(
|
||||
getattr(self.db, "get_bytes",
|
||||
self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
|
||||
# create a hash table for fast lookups of column names in the
|
||||
# loop below
|
||||
oldcolumnhash = {}
|
||||
for c in oldcolumnlist:
|
||||
oldcolumnhash[c] = c
|
||||
|
||||
# create a new column list containing both the old and new
|
||||
# column names
|
||||
newcolumnlist = copy.copy(oldcolumnlist)
|
||||
for c in columns:
|
||||
if not c in oldcolumnhash:
|
||||
newcolumnlist.append(c)
|
||||
|
||||
# store the table's new extended column list
|
||||
if newcolumnlist != oldcolumnlist :
|
||||
# delete the old one first since we opened with DB_DUP
|
||||
self.db.delete(columnlist_key, txn=txn)
|
||||
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
|
||||
pickle.dumps(newcolumnlist, 1),
|
||||
txn=txn)
|
||||
|
||||
txn.commit()
|
||||
txn = None
|
||||
|
||||
self.__load_column_info(table)
|
||||
except db.DBError, dberror:
|
||||
if txn:
|
||||
txn.abort()
|
||||
if sys.version_info < (2, 6) :
|
||||
raise TableDBError, dberror[1]
|
||||
else :
|
||||
raise TableDBError, dberror.args[1]
|
||||
|
||||
|
||||
def __load_column_info(self, table) :
|
||||
"""initialize the self.__tablecolumns dict"""
|
||||
# check the column names
|
||||
try:
|
||||
tcolpickles = getattr(self.db, "get_bytes",
|
||||
self.db.get)(_columns_key(table))
|
||||
except db.DBNotFoundError:
|
||||
raise TableDBError, "unknown table: %r" % (table,)
|
||||
if not tcolpickles:
|
||||
raise TableDBError, "unknown table: %r" % (table,)
|
||||
self.__tablecolumns[table] = pickle.loads(tcolpickles)
|
||||
|
||||
def __new_rowid(self, table, txn) :
|
||||
"""Create a new unique row identifier"""
|
||||
unique = 0
|
||||
while not unique:
|
||||
# Generate a random 64-bit row ID string
|
||||
# (note: might have <64 bits of true randomness
|
||||
# but it's plenty for our database id needs!)
|
||||
blist = []
|
||||
for x in xrange(_rowid_str_len):
|
||||
blist.append(random.randint(0,255))
|
||||
newid = struct.pack('B'*_rowid_str_len, *blist)
|
||||
|
||||
if sys.version_info[0] >= 3 :
|
||||
newid = newid.decode("iso8859-1") # 8 bits
|
||||
|
||||
# Guarantee uniqueness by adding this key to the database
|
||||
try:
|
||||
self.db.put(_rowid_key(table, newid), None, txn=txn,
|
||||
flags=db.DB_NOOVERWRITE)
|
||||
except db.DBKeyExistError:
|
||||
pass
|
||||
else:
|
||||
unique = 1
|
||||
|
||||
return newid
|
||||
|
||||
|
||||
def Insert(self, table, rowdict) :
|
||||
"""Insert(table, datadict) - Insert a new row into the table
|
||||
using the keys+values from rowdict as the column values.
|
||||
"""
|
||||
|
||||
txn = None
|
||||
try:
|
||||
if not getattr(self.db, "has_key")(_columns_key(table)):
|
||||
raise TableDBError, "unknown table"
|
||||
|
||||
# check the validity of each column name
|
||||
if not table in self.__tablecolumns:
|
||||
self.__load_column_info(table)
|
||||
for column in rowdict.keys() :
|
||||
if not self.__tablecolumns[table].count(column):
|
||||
raise TableDBError, "unknown column: %r" % (column,)
|
||||
|
||||
# get a unique row identifier for this row
|
||||
txn = self.env.txn_begin()
|
||||
rowid = self.__new_rowid(table, txn=txn)
|
||||
|
||||
# insert the row values into the table database
|
||||
for column, dataitem in rowdict.items():
|
||||
# store the value
|
||||
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
|
||||
|
||||
txn.commit()
|
||||
txn = None
|
||||
|
||||
except db.DBError, dberror:
|
||||
# WIBNI we could just abort the txn and re-raise the exception?
|
||||
# But no, because TableDBError is not related to DBError via
|
||||
# inheritance, so it would be backwards incompatible. Do the next
|
||||
# best thing.
|
||||
info = sys.exc_info()
|
||||
if txn:
|
||||
txn.abort()
|
||||
self.db.delete(_rowid_key(table, rowid))
|
||||
if sys.version_info < (2, 6) :
|
||||
raise TableDBError, dberror[1], info[2]
|
||||
else :
|
||||
raise TableDBError, dberror.args[1], info[2]
|
||||
|
||||
|
||||
def Modify(self, table, conditions={}, mappings={}):
|
||||
"""Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings'
|
||||
|
||||
* table - the table name
|
||||
* conditions - a dictionary keyed on column names containing
|
||||
a condition callable expecting the data string as an
|
||||
argument and returning a boolean.
|
||||
* mappings - a dictionary keyed on column names containing a
|
||||
condition callable expecting the data string as an argument and
|
||||
returning the new string for that column.
|
||||
"""
|
||||
|
||||
try:
|
||||
matching_rowids = self.__Select(table, [], conditions)
|
||||
|
||||
# modify only requested columns
|
||||
columns = mappings.keys()
|
||||
for rowid in matching_rowids.keys():
|
||||
txn = None
|
||||
try:
|
||||
for column in columns:
|
||||
txn = self.env.txn_begin()
|
||||
# modify the requested column
|
||||
try:
|
||||
dataitem = self.db.get(
|
||||
_data_key(table, column, rowid),
|
||||
txn=txn)
|
||||
self.db.delete(
|
||||
_data_key(table, column, rowid),
|
||||
txn=txn)
|
||||
except db.DBNotFoundError:
|
||||
# XXXXXXX row key somehow didn't exist, assume no
|
||||
# error
|
||||
dataitem = None
|
||||
dataitem = mappings[column](dataitem)
|
||||
if dataitem is not None:
|
||||
self.db.put(
|
||||
_data_key(table, column, rowid),
|
||||
dataitem, txn=txn)
|
||||
txn.commit()
|
||||
txn = None
|
||||
|
||||
# catch all exceptions here since we call unknown callables
|
||||
except:
|
||||
if txn:
|
||||
txn.abort()
|
||||
raise
|
||||
|
||||
except db.DBError, dberror:
|
||||
if sys.version_info < (2, 6) :
|
||||
raise TableDBError, dberror[1]
|
||||
else :
|
||||
raise TableDBError, dberror.args[1]
|
||||
|
||||
def Delete(self, table, conditions={}):
|
||||
"""Delete(table, conditions) - Delete items matching the given
|
||||
conditions from the table.
|
||||
|
||||
* conditions - a dictionary keyed on column names containing
|
||||
condition functions expecting the data string as an
|
||||
argument and returning a boolean.
|
||||
"""
|
||||
|
||||
try:
|
||||
matching_rowids = self.__Select(table, [], conditions)
|
||||
|
||||
# delete row data from all columns
|
||||
columns = self.__tablecolumns[table]
|
||||
for rowid in matching_rowids.keys():
|
||||
txn = None
|
||||
try:
|
||||
txn = self.env.txn_begin()
|
||||
for column in columns:
|
||||
# delete the data key
|
||||
try:
|
||||
self.db.delete(_data_key(table, column, rowid),
|
||||
txn=txn)
|
||||
except db.DBNotFoundError:
|
||||
# XXXXXXX column may not exist, assume no error
|
||||
pass
|
||||
|
||||
try:
|
||||
self.db.delete(_rowid_key(table, rowid), txn=txn)
|
||||
except db.DBNotFoundError:
|
||||
# XXXXXXX row key somehow didn't exist, assume no error
|
||||
pass
|
||||
txn.commit()
|
||||
txn = None
|
||||
except db.DBError, dberror:
|
||||
if txn:
|
||||
txn.abort()
|
||||
raise
|
||||
except db.DBError, dberror:
|
||||
if sys.version_info < (2, 6) :
|
||||
raise TableDBError, dberror[1]
|
||||
else :
|
||||
raise TableDBError, dberror.args[1]
|
||||
|
||||
|
||||
def Select(self, table, columns, conditions={}):
|
||||
"""Select(table, columns, conditions) - retrieve specific row data
|
||||
Returns a list of row column->value mapping dictionaries.
|
||||
|
||||
* columns - a list of which column data to return. If
|
||||
columns is None, all columns will be returned.
|
||||
* conditions - a dictionary keyed on column names
|
||||
containing callable conditions expecting the data string as an
|
||||
argument and returning a boolean.
|
||||
"""
|
||||
try:
|
||||
if not table in self.__tablecolumns:
|
||||
self.__load_column_info(table)
|
||||
if columns is None:
|
||||
columns = self.__tablecolumns[table]
|
||||
matching_rowids = self.__Select(table, columns, conditions)
|
||||
except db.DBError, dberror:
|
||||
if sys.version_info < (2, 6) :
|
||||
raise TableDBError, dberror[1]
|
||||
else :
|
||||
raise TableDBError, dberror.args[1]
|
||||
# return the matches as a list of dictionaries
|
||||
return matching_rowids.values()
|
||||
|
||||
|
||||
def __Select(self, table, columns, conditions):
|
||||
"""__Select() - Used to implement Select and Delete (above)
|
||||
Returns a dictionary keyed on rowids containing dicts
|
||||
holding the row data for columns listed in the columns param
|
||||
that match the given conditions.
|
||||
* conditions is a dictionary keyed on column names
|
||||
containing callable conditions expecting the data string as an
|
||||
argument and returning a boolean.
|
||||
"""
|
||||
# check the validity of each column name
|
||||
if not table in self.__tablecolumns:
|
||||
self.__load_column_info(table)
|
||||
if columns is None:
|
||||
columns = self.tablecolumns[table]
|
||||
for column in (columns + conditions.keys()):
|
||||
if not self.__tablecolumns[table].count(column):
|
||||
raise TableDBError, "unknown column: %r" % (column,)
|
||||
|
||||
# keyed on rows that match so far, containings dicts keyed on
|
||||
# column names containing the data for that row and column.
|
||||
matching_rowids = {}
|
||||
# keys are rowids that do not match
|
||||
rejected_rowids = {}
|
||||
|
||||
# attempt to sort the conditions in such a way as to minimize full
|
||||
# column lookups
|
||||
def cmp_conditions(atuple, btuple):
|
||||
a = atuple[1]
|
||||
b = btuple[1]
|
||||
if type(a) is type(b):
|
||||
|
||||
# Needed for python 3. "cmp" vanished in 3.0.1
|
||||
def cmp(a, b) :
|
||||
if a==b : return 0
|
||||
if a<b : return -1
|
||||
return 1
|
||||
|
||||
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
|
||||
# longest prefix first
|
||||
return cmp(len(b.prefix), len(a.prefix))
|
||||
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
|
||||
# longest likestr first
|
||||
return cmp(len(b.likestr), len(a.likestr))
|
||||
return 0
|
||||
if isinstance(a, ExactCond):
|
||||
return -1
|
||||
if isinstance(b, ExactCond):
|
||||
return 1
|
||||
if isinstance(a, PrefixCond):
|
||||
return -1
|
||||
if isinstance(b, PrefixCond):
|
||||
return 1
|
||||
# leave all unknown condition callables alone as equals
|
||||
return 0
|
||||
|
||||
if sys.version_info < (2, 6) :
|
||||
conditionlist = conditions.items()
|
||||
conditionlist.sort(cmp_conditions)
|
||||
else : # Insertion Sort. Please, improve
|
||||
conditionlist = []
|
||||
for i in conditions.items() :
|
||||
for j, k in enumerate(conditionlist) :
|
||||
r = cmp_conditions(k, i)
|
||||
if r == 1 :
|
||||
conditionlist.insert(j, i)
|
||||
break
|
||||
else :
|
||||
conditionlist.append(i)
|
||||
|
||||
# Apply conditions to column data to find what we want
|
||||
cur = self.db.cursor()
|
||||
column_num = -1
|
||||
for column, condition in conditionlist:
|
||||
column_num = column_num + 1
|
||||
searchkey = _search_col_data_key(table, column)
|
||||
# speedup: don't linear search columns within loop
|
||||
if column in columns:
|
||||
savethiscolumndata = 1 # save the data for return
|
||||
else:
|
||||
savethiscolumndata = 0 # data only used for selection
|
||||
|
||||
try:
|
||||
key, data = cur.set_range(searchkey)
|
||||
while key[:len(searchkey)] == searchkey:
|
||||
# extract the rowid from the key
|
||||
rowid = key[-_rowid_str_len:]
|
||||
|
||||
if not rowid in rejected_rowids:
|
||||
# if no condition was specified or the condition
|
||||
# succeeds, add row to our match list.
|
||||
if not condition or condition(data):
|
||||
if not rowid in matching_rowids:
|
||||
matching_rowids[rowid] = {}
|
||||
if savethiscolumndata:
|
||||
matching_rowids[rowid][column] = data
|
||||
else:
|
||||
if rowid in matching_rowids:
|
||||
del matching_rowids[rowid]
|
||||
rejected_rowids[rowid] = rowid
|
||||
|
||||
key, data = cur.next()
|
||||
|
||||
except db.DBError, dberror:
|
||||
if dberror.args[0] != db.DB_NOTFOUND:
|
||||
raise
|
||||
continue
|
||||
|
||||
cur.close()
|
||||
|
||||
# we're done selecting rows, garbage collect the reject list
|
||||
del rejected_rowids
|
||||
|
||||
# extract any remaining desired column data from the
|
||||
# database for the matching rows.
|
||||
if len(columns) > 0:
|
||||
for rowid, rowdata in matching_rowids.items():
|
||||
for column in columns:
|
||||
if column in rowdata:
|
||||
continue
|
||||
try:
|
||||
rowdata[column] = self.db.get(
|
||||
_data_key(table, column, rowid))
|
||||
except db.DBError, dberror:
|
||||
if sys.version_info < (2, 6) :
|
||||
if dberror[0] != db.DB_NOTFOUND:
|
||||
raise
|
||||
else :
|
||||
if dberror.args[0] != db.DB_NOTFOUND:
|
||||
raise
|
||||
rowdata[column] = None
|
||||
|
||||
# return the matches
|
||||
return matching_rowids
|
||||
|
||||
|
||||
def Drop(self, table):
|
||||
"""Remove an entire table from the database"""
|
||||
txn = None
|
||||
try:
|
||||
txn = self.env.txn_begin()
|
||||
|
||||
# delete the column list
|
||||
self.db.delete(_columns_key(table), txn=txn)
|
||||
|
||||
cur = self.db.cursor(txn)
|
||||
|
||||
# delete all keys containing this tables column and row info
|
||||
table_key = _search_all_data_key(table)
|
||||
while 1:
|
||||
try:
|
||||
key, data = cur.set_range(table_key)
|
||||
except db.DBNotFoundError:
|
||||
break
|
||||
# only delete items in this table
|
||||
if key[:len(table_key)] != table_key:
|
||||
break
|
||||
cur.delete()
|
||||
|
||||
# delete all rowids used by this table
|
||||
table_key = _search_rowid_key(table)
|
||||
while 1:
|
||||
try:
|
||||
key, data = cur.set_range(table_key)
|
||||
except db.DBNotFoundError:
|
||||
break
|
||||
# only delete items in this table
|
||||
if key[:len(table_key)] != table_key:
|
||||
break
|
||||
cur.delete()
|
||||
|
||||
cur.close()
|
||||
|
||||
# delete the tablename from the table name list
|
||||
tablelist = pickle.loads(
|
||||
getattr(self.db, "get_bytes", self.db.get)(_table_names_key,
|
||||
txn=txn, flags=db.DB_RMW))
|
||||
try:
|
||||
tablelist.remove(table)
|
||||
except ValueError:
|
||||
# hmm, it wasn't there, oh well, that's what we want.
|
||||
pass
|
||||
# delete 1st, incase we opened with DB_DUP
|
||||
self.db.delete(_table_names_key, txn=txn)
|
||||
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
|
||||
pickle.dumps(tablelist, 1), txn=txn)
|
||||
|
||||
txn.commit()
|
||||
txn = None
|
||||
|
||||
if table in self.__tablecolumns:
|
||||
del self.__tablecolumns[table]
|
||||
|
||||
except db.DBError, dberror:
|
||||
if txn:
|
||||
txn.abort()
|
||||
raise TableDBError(dberror.args[1])
|
Binary file not shown.
83
PythonHome/Lib/bsddb/dbutils.py
Normal file
83
PythonHome/Lib/bsddb/dbutils.py
Normal file
@ -0,0 +1,83 @@
|
||||
#------------------------------------------------------------------------
|
||||
#
|
||||
# Copyright (C) 2000 Autonomous Zone Industries
|
||||
#
|
||||
# License: This is free software. You may use this software for any
|
||||
# purpose including modification/redistribution, so long as
|
||||
# this header remains intact and that you do not claim any
|
||||
# rights of ownership or authorship of this software. This
|
||||
# software has been tested, but no warranty is expressed or
|
||||
# implied.
|
||||
#
|
||||
# Author: Gregory P. Smith <greg@krypto.org>
|
||||
#
|
||||
# Note: I don't know how useful this is in reality since when a
|
||||
# DBLockDeadlockError happens the current transaction is supposed to be
|
||||
# aborted. If it doesn't then when the operation is attempted again
|
||||
# the deadlock is still happening...
|
||||
# --Robin
|
||||
#
|
||||
#------------------------------------------------------------------------
|
||||
|
||||
|
||||
#
|
||||
# import the time.sleep function in a namespace safe way to allow
|
||||
# "from bsddb.dbutils import *"
|
||||
#
|
||||
from time import sleep as _sleep
|
||||
|
||||
import sys
|
||||
absolute_import = (sys.version_info[0] >= 3)
|
||||
if absolute_import :
|
||||
# Because this syntaxis is not valid before Python 2.5
|
||||
exec("from . import db")
|
||||
else :
|
||||
import db
|
||||
|
||||
# always sleep at least N seconds between retrys
|
||||
_deadlock_MinSleepTime = 1.0/128
|
||||
# never sleep more than N seconds between retrys
|
||||
_deadlock_MaxSleepTime = 3.14159
|
||||
|
||||
# Assign a file object to this for a "sleeping" message to be written to it
|
||||
# each retry
|
||||
_deadlock_VerboseFile = None
|
||||
|
||||
|
||||
def DeadlockWrap(function, *_args, **_kwargs):
|
||||
"""DeadlockWrap(function, *_args, **_kwargs) - automatically retries
|
||||
function in case of a database deadlock.
|
||||
|
||||
This is a function intended to be used to wrap database calls such
|
||||
that they perform retrys with exponentially backing off sleeps in
|
||||
between when a DBLockDeadlockError exception is raised.
|
||||
|
||||
A 'max_retries' parameter may optionally be passed to prevent it
|
||||
from retrying forever (in which case the exception will be reraised).
|
||||
|
||||
d = DB(...)
|
||||
d.open(...)
|
||||
DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
|
||||
"""
|
||||
sleeptime = _deadlock_MinSleepTime
|
||||
max_retries = _kwargs.get('max_retries', -1)
|
||||
if 'max_retries' in _kwargs:
|
||||
del _kwargs['max_retries']
|
||||
while True:
|
||||
try:
|
||||
return function(*_args, **_kwargs)
|
||||
except db.DBLockDeadlockError:
|
||||
if _deadlock_VerboseFile:
|
||||
_deadlock_VerboseFile.write(
|
||||
'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
|
||||
_sleep(sleeptime)
|
||||
# exponential backoff in the sleep time
|
||||
sleeptime *= 2
|
||||
if sleeptime > _deadlock_MaxSleepTime:
|
||||
sleeptime = _deadlock_MaxSleepTime
|
||||
max_retries -= 1
|
||||
if max_retries == -1:
|
||||
raise
|
||||
|
||||
|
||||
#------------------------------------------------------------------------
|
Binary file not shown.
199
PythonHome/Lib/cProfile.py
Normal file
199
PythonHome/Lib/cProfile.py
Normal file
@ -0,0 +1,199 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
"""Python interface for the 'lsprof' profiler.
|
||||
Compatible with the 'profile' module.
|
||||
"""
|
||||
|
||||
__all__ = ["run", "runctx", "help", "Profile"]
|
||||
|
||||
import _lsprof
|
||||
|
||||
# ____________________________________________________________
|
||||
# Simple interface
|
||||
|
||||
def run(statement, filename=None, sort=-1):
|
||||
"""Run statement under profiler optionally saving results in filename
|
||||
|
||||
This function takes a single argument that can be passed to the
|
||||
"exec" statement, and an optional file name. In all cases this
|
||||
routine attempts to "exec" its first argument and gather profiling
|
||||
statistics from the execution. If no file name is present, then this
|
||||
function automatically prints a simple profiling report, sorted by the
|
||||
standard name string (file/line/function-name) that is presented in
|
||||
each line.
|
||||
"""
|
||||
prof = Profile()
|
||||
result = None
|
||||
try:
|
||||
try:
|
||||
prof = prof.run(statement)
|
||||
except SystemExit:
|
||||
pass
|
||||
finally:
|
||||
if filename is not None:
|
||||
prof.dump_stats(filename)
|
||||
else:
|
||||
result = prof.print_stats(sort)
|
||||
return result
|
||||
|
||||
def runctx(statement, globals, locals, filename=None, sort=-1):
|
||||
"""Run statement under profiler, supplying your own globals and locals,
|
||||
optionally saving results in filename.
|
||||
|
||||
statement and filename have the same semantics as profile.run
|
||||
"""
|
||||
prof = Profile()
|
||||
result = None
|
||||
try:
|
||||
try:
|
||||
prof = prof.runctx(statement, globals, locals)
|
||||
except SystemExit:
|
||||
pass
|
||||
finally:
|
||||
if filename is not None:
|
||||
prof.dump_stats(filename)
|
||||
else:
|
||||
result = prof.print_stats(sort)
|
||||
return result
|
||||
|
||||
# Backwards compatibility.
|
||||
def help():
|
||||
print "Documentation for the profile/cProfile modules can be found "
|
||||
print "in the Python Library Reference, section 'The Python Profiler'."
|
||||
|
||||
# ____________________________________________________________
|
||||
|
||||
class Profile(_lsprof.Profiler):
|
||||
"""Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
|
||||
|
||||
Builds a profiler object using the specified timer function.
|
||||
The default timer is a fast built-in one based on real time.
|
||||
For custom timer functions returning integers, time_unit can
|
||||
be a float specifying a scale (i.e. how long each integer unit
|
||||
is, in seconds).
|
||||
"""
|
||||
|
||||
# Most of the functionality is in the base class.
|
||||
# This subclass only adds convenient and backward-compatible methods.
|
||||
|
||||
def print_stats(self, sort=-1):
|
||||
import pstats
|
||||
pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
|
||||
|
||||
def dump_stats(self, file):
|
||||
import marshal
|
||||
f = open(file, 'wb')
|
||||
self.create_stats()
|
||||
marshal.dump(self.stats, f)
|
||||
f.close()
|
||||
|
||||
def create_stats(self):
|
||||
self.disable()
|
||||
self.snapshot_stats()
|
||||
|
||||
def snapshot_stats(self):
|
||||
entries = self.getstats()
|
||||
self.stats = {}
|
||||
callersdicts = {}
|
||||
# call information
|
||||
for entry in entries:
|
||||
func = label(entry.code)
|
||||
nc = entry.callcount # ncalls column of pstats (before '/')
|
||||
cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
|
||||
tt = entry.inlinetime # tottime column of pstats
|
||||
ct = entry.totaltime # cumtime column of pstats
|
||||
callers = {}
|
||||
callersdicts[id(entry.code)] = callers
|
||||
self.stats[func] = cc, nc, tt, ct, callers
|
||||
# subcall information
|
||||
for entry in entries:
|
||||
if entry.calls:
|
||||
func = label(entry.code)
|
||||
for subentry in entry.calls:
|
||||
try:
|
||||
callers = callersdicts[id(subentry.code)]
|
||||
except KeyError:
|
||||
continue
|
||||
nc = subentry.callcount
|
||||
cc = nc - subentry.reccallcount
|
||||
tt = subentry.inlinetime
|
||||
ct = subentry.totaltime
|
||||
if func in callers:
|
||||
prev = callers[func]
|
||||
nc += prev[0]
|
||||
cc += prev[1]
|
||||
tt += prev[2]
|
||||
ct += prev[3]
|
||||
callers[func] = nc, cc, tt, ct
|
||||
|
||||
# The following two methods can be called by clients to use
|
||||
# a profiler to profile a statement, given as a string.
|
||||
|
||||
def run(self, cmd):
|
||||
import __main__
|
||||
dict = __main__.__dict__
|
||||
return self.runctx(cmd, dict, dict)
|
||||
|
||||
def runctx(self, cmd, globals, locals):
|
||||
self.enable()
|
||||
try:
|
||||
exec cmd in globals, locals
|
||||
finally:
|
||||
self.disable()
|
||||
return self
|
||||
|
||||
# This method is more useful to profile a single function call.
|
||||
def runcall(self, func, *args, **kw):
|
||||
self.enable()
|
||||
try:
|
||||
return func(*args, **kw)
|
||||
finally:
|
||||
self.disable()
|
||||
|
||||
# ____________________________________________________________
|
||||
|
||||
def label(code):
|
||||
if isinstance(code, str):
|
||||
return ('~', 0, code) # built-in functions ('~' sorts at the end)
|
||||
else:
|
||||
return (code.co_filename, code.co_firstlineno, code.co_name)
|
||||
|
||||
# ____________________________________________________________
|
||||
|
||||
def main():
|
||||
import os, sys
|
||||
from optparse import OptionParser
|
||||
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
|
||||
parser = OptionParser(usage=usage)
|
||||
parser.allow_interspersed_args = False
|
||||
parser.add_option('-o', '--outfile', dest="outfile",
|
||||
help="Save stats to <outfile>", default=None)
|
||||
parser.add_option('-s', '--sort', dest="sort",
|
||||
help="Sort order when printing to stdout, based on pstats.Stats class",
|
||||
default=-1)
|
||||
|
||||
if not sys.argv[1:]:
|
||||
parser.print_usage()
|
||||
sys.exit(2)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
sys.argv[:] = args
|
||||
|
||||
if len(args) > 0:
|
||||
progname = args[0]
|
||||
sys.path.insert(0, os.path.dirname(progname))
|
||||
with open(progname, 'rb') as fp:
|
||||
code = compile(fp.read(), progname, 'exec')
|
||||
globs = {
|
||||
'__file__': progname,
|
||||
'__name__': '__main__',
|
||||
'__package__': None,
|
||||
}
|
||||
runctx(code, globs, None, options.outfile, options.sort)
|
||||
else:
|
||||
parser.print_usage()
|
||||
return parser
|
||||
|
||||
# When invoked as main program, invoke the profiler on a script
|
||||
if __name__ == '__main__':
|
||||
main()
|
Binary file not shown.
713
PythonHome/Lib/calendar.py
Normal file
713
PythonHome/Lib/calendar.py
Normal file
@ -0,0 +1,713 @@
|
||||
"""Calendar printing functions
|
||||
|
||||
Note when comparing these calendars to the ones printed by cal(1): By
|
||||
default, these calendars have Monday as the first day of the week, and
|
||||
Sunday as the last (the European convention). Use setfirstweekday() to
|
||||
set the first day of the week (0=Monday, 6=Sunday)."""
|
||||
|
||||
import sys
|
||||
import datetime
|
||||
import locale as _locale
|
||||
|
||||
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
|
||||
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
|
||||
"monthcalendar", "prmonth", "month", "prcal", "calendar",
|
||||
"timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
|
||||
|
||||
# Exception raised for bad input (with string parameter for details)
|
||||
error = ValueError
|
||||
|
||||
# Exceptions raised for bad input
|
||||
class IllegalMonthError(ValueError):
|
||||
def __init__(self, month):
|
||||
self.month = month
|
||||
def __str__(self):
|
||||
return "bad month number %r; must be 1-12" % self.month
|
||||
|
||||
|
||||
class IllegalWeekdayError(ValueError):
|
||||
def __init__(self, weekday):
|
||||
self.weekday = weekday
|
||||
def __str__(self):
|
||||
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
|
||||
|
||||
|
||||
# Constants for months referenced later
|
||||
January = 1
|
||||
February = 2
|
||||
|
||||
# Number of days per month (except for February in leap years)
|
||||
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
|
||||
|
||||
# This module used to have hard-coded lists of day and month names, as
|
||||
# English strings. The classes following emulate a read-only version of
|
||||
# that, but supply localized names. Note that the values are computed
|
||||
# fresh on each call, in case the user changes locale between calls.
|
||||
|
||||
class _localized_month:
|
||||
|
||||
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
|
||||
_months.insert(0, lambda x: "")
|
||||
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
|
||||
def __getitem__(self, i):
|
||||
funcs = self._months[i]
|
||||
if isinstance(i, slice):
|
||||
return [f(self.format) for f in funcs]
|
||||
else:
|
||||
return funcs(self.format)
|
||||
|
||||
def __len__(self):
|
||||
return 13
|
||||
|
||||
|
||||
class _localized_day:
|
||||
|
||||
# January 1, 2001, was a Monday.
|
||||
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
|
||||
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
|
||||
def __getitem__(self, i):
|
||||
funcs = self._days[i]
|
||||
if isinstance(i, slice):
|
||||
return [f(self.format) for f in funcs]
|
||||
else:
|
||||
return funcs(self.format)
|
||||
|
||||
def __len__(self):
|
||||
return 7
|
||||
|
||||
|
||||
# Full and abbreviated names of weekdays
|
||||
day_name = _localized_day('%A')
|
||||
day_abbr = _localized_day('%a')
|
||||
|
||||
# Full and abbreviated names of months (1-based arrays!!!)
|
||||
month_name = _localized_month('%B')
|
||||
month_abbr = _localized_month('%b')
|
||||
|
||||
# Constants for weekdays
|
||||
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
|
||||
|
||||
|
||||
def isleap(year):
|
||||
"""Return True for leap years, False for non-leap years."""
|
||||
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
|
||||
|
||||
|
||||
def leapdays(y1, y2):
|
||||
"""Return number of leap years in range [y1, y2).
|
||||
Assume y1 <= y2."""
|
||||
y1 -= 1
|
||||
y2 -= 1
|
||||
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
|
||||
|
||||
|
||||
def weekday(year, month, day):
|
||||
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
|
||||
day (1-31)."""
|
||||
return datetime.date(year, month, day).weekday()
|
||||
|
||||
|
||||
def monthrange(year, month):
|
||||
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
|
||||
year, month."""
|
||||
if not 1 <= month <= 12:
|
||||
raise IllegalMonthError(month)
|
||||
day1 = weekday(year, month, 1)
|
||||
ndays = mdays[month] + (month == February and isleap(year))
|
||||
return day1, ndays
|
||||
|
||||
|
||||
class Calendar(object):
|
||||
"""
|
||||
Base calendar class. This class doesn't do any formatting. It simply
|
||||
provides data to subclasses.
|
||||
"""
|
||||
|
||||
def __init__(self, firstweekday=0):
|
||||
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
|
||||
|
||||
def getfirstweekday(self):
|
||||
return self._firstweekday % 7
|
||||
|
||||
def setfirstweekday(self, firstweekday):
|
||||
self._firstweekday = firstweekday
|
||||
|
||||
firstweekday = property(getfirstweekday, setfirstweekday)
|
||||
|
||||
def iterweekdays(self):
|
||||
"""
|
||||
Return a iterator for one week of weekday numbers starting with the
|
||||
configured first one.
|
||||
"""
|
||||
for i in range(self.firstweekday, self.firstweekday + 7):
|
||||
yield i%7
|
||||
|
||||
def itermonthdates(self, year, month):
|
||||
"""
|
||||
Return an iterator for one month. The iterator will yield datetime.date
|
||||
values and will always iterate through complete weeks, so it will yield
|
||||
dates outside the specified month.
|
||||
"""
|
||||
date = datetime.date(year, month, 1)
|
||||
# Go back to the beginning of the week
|
||||
days = (date.weekday() - self.firstweekday) % 7
|
||||
date -= datetime.timedelta(days=days)
|
||||
oneday = datetime.timedelta(days=1)
|
||||
while True:
|
||||
yield date
|
||||
try:
|
||||
date += oneday
|
||||
except OverflowError:
|
||||
# Adding one day could fail after datetime.MAXYEAR
|
||||
break
|
||||
if date.month != month and date.weekday() == self.firstweekday:
|
||||
break
|
||||
|
||||
def itermonthdays2(self, year, month):
|
||||
"""
|
||||
Like itermonthdates(), but will yield (day number, weekday number)
|
||||
tuples. For days outside the specified month the day number is 0.
|
||||
"""
|
||||
for date in self.itermonthdates(year, month):
|
||||
if date.month != month:
|
||||
yield (0, date.weekday())
|
||||
else:
|
||||
yield (date.day, date.weekday())
|
||||
|
||||
def itermonthdays(self, year, month):
|
||||
"""
|
||||
Like itermonthdates(), but will yield day numbers. For days outside
|
||||
the specified month the day number is 0.
|
||||
"""
|
||||
for date in self.itermonthdates(year, month):
|
||||
if date.month != month:
|
||||
yield 0
|
||||
else:
|
||||
yield date.day
|
||||
|
||||
def monthdatescalendar(self, year, month):
|
||||
"""
|
||||
Return a matrix (list of lists) representing a month's calendar.
|
||||
Each row represents a week; week entries are datetime.date values.
|
||||
"""
|
||||
dates = list(self.itermonthdates(year, month))
|
||||
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
|
||||
|
||||
def monthdays2calendar(self, year, month):
|
||||
"""
|
||||
Return a matrix representing a month's calendar.
|
||||
Each row represents a week; week entries are
|
||||
(day number, weekday number) tuples. Day numbers outside this month
|
||||
are zero.
|
||||
"""
|
||||
days = list(self.itermonthdays2(year, month))
|
||||
return [ days[i:i+7] for i in range(0, len(days), 7) ]
|
||||
|
||||
def monthdayscalendar(self, year, month):
|
||||
"""
|
||||
Return a matrix representing a month's calendar.
|
||||
Each row represents a week; days outside this month are zero.
|
||||
"""
|
||||
days = list(self.itermonthdays(year, month))
|
||||
return [ days[i:i+7] for i in range(0, len(days), 7) ]
|
||||
|
||||
def yeardatescalendar(self, year, width=3):
|
||||
"""
|
||||
Return the data for the specified year ready for formatting. The return
|
||||
value is a list of month rows. Each month row contains up to width months.
|
||||
Each month contains between 4 and 6 weeks and each week contains 1-7
|
||||
days. Days are datetime.date objects.
|
||||
"""
|
||||
months = [
|
||||
self.monthdatescalendar(year, i)
|
||||
for i in range(January, January+12)
|
||||
]
|
||||
return [months[i:i+width] for i in range(0, len(months), width) ]
|
||||
|
||||
def yeardays2calendar(self, year, width=3):
|
||||
"""
|
||||
Return the data for the specified year ready for formatting (similar to
|
||||
yeardatescalendar()). Entries in the week lists are
|
||||
(day number, weekday number) tuples. Day numbers outside this month are
|
||||
zero.
|
||||
"""
|
||||
months = [
|
||||
self.monthdays2calendar(year, i)
|
||||
for i in range(January, January+12)
|
||||
]
|
||||
return [months[i:i+width] for i in range(0, len(months), width) ]
|
||||
|
||||
def yeardayscalendar(self, year, width=3):
|
||||
"""
|
||||
Return the data for the specified year ready for formatting (similar to
|
||||
yeardatescalendar()). Entries in the week lists are day numbers.
|
||||
Day numbers outside this month are zero.
|
||||
"""
|
||||
months = [
|
||||
self.monthdayscalendar(year, i)
|
||||
for i in range(January, January+12)
|
||||
]
|
||||
return [months[i:i+width] for i in range(0, len(months), width) ]
|
||||
|
||||
|
||||
class TextCalendar(Calendar):
|
||||
"""
|
||||
Subclass of Calendar that outputs a calendar as a simple plain text
|
||||
similar to the UNIX program cal.
|
||||
"""
|
||||
|
||||
def prweek(self, theweek, width):
|
||||
"""
|
||||
Print a single week (no newline).
|
||||
"""
|
||||
print self.formatweek(theweek, width),
|
||||
|
||||
def formatday(self, day, weekday, width):
|
||||
"""
|
||||
Returns a formatted day.
|
||||
"""
|
||||
if day == 0:
|
||||
s = ''
|
||||
else:
|
||||
s = '%2i' % day # right-align single-digit days
|
||||
return s.center(width)
|
||||
|
||||
def formatweek(self, theweek, width):
|
||||
"""
|
||||
Returns a single week in a string (no newline).
|
||||
"""
|
||||
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
|
||||
|
||||
def formatweekday(self, day, width):
|
||||
"""
|
||||
Returns a formatted week day name.
|
||||
"""
|
||||
if width >= 9:
|
||||
names = day_name
|
||||
else:
|
||||
names = day_abbr
|
||||
return names[day][:width].center(width)
|
||||
|
||||
def formatweekheader(self, width):
|
||||
"""
|
||||
Return a header for a week.
|
||||
"""
|
||||
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
|
||||
|
||||
def formatmonthname(self, theyear, themonth, width, withyear=True):
|
||||
"""
|
||||
Return a formatted month name.
|
||||
"""
|
||||
s = month_name[themonth]
|
||||
if withyear:
|
||||
s = "%s %r" % (s, theyear)
|
||||
return s.center(width)
|
||||
|
||||
def prmonth(self, theyear, themonth, w=0, l=0):
|
||||
"""
|
||||
Print a month's calendar.
|
||||
"""
|
||||
print self.formatmonth(theyear, themonth, w, l),
|
||||
|
||||
def formatmonth(self, theyear, themonth, w=0, l=0):
|
||||
"""
|
||||
Return a month's calendar string (multi-line).
|
||||
"""
|
||||
w = max(2, w)
|
||||
l = max(1, l)
|
||||
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
|
||||
s = s.rstrip()
|
||||
s += '\n' * l
|
||||
s += self.formatweekheader(w).rstrip()
|
||||
s += '\n' * l
|
||||
for week in self.monthdays2calendar(theyear, themonth):
|
||||
s += self.formatweek(week, w).rstrip()
|
||||
s += '\n' * l
|
||||
return s
|
||||
|
||||
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
|
||||
"""
|
||||
Returns a year's calendar as a multi-line string.
|
||||
"""
|
||||
w = max(2, w)
|
||||
l = max(1, l)
|
||||
c = max(2, c)
|
||||
colwidth = (w + 1) * 7 - 1
|
||||
v = []
|
||||
a = v.append
|
||||
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
|
||||
a('\n'*l)
|
||||
header = self.formatweekheader(w)
|
||||
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
|
||||
# months in this row
|
||||
months = range(m*i+1, min(m*(i+1)+1, 13))
|
||||
a('\n'*l)
|
||||
names = (self.formatmonthname(theyear, k, colwidth, False)
|
||||
for k in months)
|
||||
a(formatstring(names, colwidth, c).rstrip())
|
||||
a('\n'*l)
|
||||
headers = (header for k in months)
|
||||
a(formatstring(headers, colwidth, c).rstrip())
|
||||
a('\n'*l)
|
||||
# max number of weeks for this row
|
||||
height = max(len(cal) for cal in row)
|
||||
for j in range(height):
|
||||
weeks = []
|
||||
for cal in row:
|
||||
if j >= len(cal):
|
||||
weeks.append('')
|
||||
else:
|
||||
weeks.append(self.formatweek(cal[j], w))
|
||||
a(formatstring(weeks, colwidth, c).rstrip())
|
||||
a('\n' * l)
|
||||
return ''.join(v)
|
||||
|
||||
def pryear(self, theyear, w=0, l=0, c=6, m=3):
|
||||
"""Print a year's calendar."""
|
||||
print self.formatyear(theyear, w, l, c, m)
|
||||
|
||||
|
||||
class HTMLCalendar(Calendar):
|
||||
"""
|
||||
This calendar returns complete HTML pages.
|
||||
"""
|
||||
|
||||
# CSS classes for the day <td>s
|
||||
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
|
||||
|
||||
def formatday(self, day, weekday):
|
||||
"""
|
||||
Return a day as a table cell.
|
||||
"""
|
||||
if day == 0:
|
||||
return '<td class="noday"> </td>' # day outside month
|
||||
else:
|
||||
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
|
||||
|
||||
def formatweek(self, theweek):
|
||||
"""
|
||||
Return a complete week as a table row.
|
||||
"""
|
||||
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
|
||||
return '<tr>%s</tr>' % s
|
||||
|
||||
def formatweekday(self, day):
|
||||
"""
|
||||
Return a weekday name as a table header.
|
||||
"""
|
||||
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
|
||||
|
||||
def formatweekheader(self):
|
||||
"""
|
||||
Return a header for a week as a table row.
|
||||
"""
|
||||
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
|
||||
return '<tr>%s</tr>' % s
|
||||
|
||||
def formatmonthname(self, theyear, themonth, withyear=True):
|
||||
"""
|
||||
Return a month name as a table row.
|
||||
"""
|
||||
if withyear:
|
||||
s = '%s %s' % (month_name[themonth], theyear)
|
||||
else:
|
||||
s = '%s' % month_name[themonth]
|
||||
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
|
||||
|
||||
def formatmonth(self, theyear, themonth, withyear=True):
|
||||
"""
|
||||
Return a formatted month as a table.
|
||||
"""
|
||||
v = []
|
||||
a = v.append
|
||||
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
|
||||
a('\n')
|
||||
a(self.formatmonthname(theyear, themonth, withyear=withyear))
|
||||
a('\n')
|
||||
a(self.formatweekheader())
|
||||
a('\n')
|
||||
for week in self.monthdays2calendar(theyear, themonth):
|
||||
a(self.formatweek(week))
|
||||
a('\n')
|
||||
a('</table>')
|
||||
a('\n')
|
||||
return ''.join(v)
|
||||
|
||||
def formatyear(self, theyear, width=3):
|
||||
"""
|
||||
Return a formatted year as a table of tables.
|
||||
"""
|
||||
v = []
|
||||
a = v.append
|
||||
width = max(width, 1)
|
||||
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
|
||||
a('\n')
|
||||
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
|
||||
for i in range(January, January+12, width):
|
||||
# months in this row
|
||||
months = range(i, min(i+width, 13))
|
||||
a('<tr>')
|
||||
for m in months:
|
||||
a('<td>')
|
||||
a(self.formatmonth(theyear, m, withyear=False))
|
||||
a('</td>')
|
||||
a('</tr>')
|
||||
a('</table>')
|
||||
return ''.join(v)
|
||||
|
||||
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
|
||||
"""
|
||||
Return a formatted year as a complete HTML page.
|
||||
"""
|
||||
if encoding is None:
|
||||
encoding = sys.getdefaultencoding()
|
||||
v = []
|
||||
a = v.append
|
||||
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
|
||||
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
|
||||
a('<html>\n')
|
||||
a('<head>\n')
|
||||
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
|
||||
if css is not None:
|
||||
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
|
||||
a('<title>Calendar for %d</title>\n' % theyear)
|
||||
a('</head>\n')
|
||||
a('<body>\n')
|
||||
a(self.formatyear(theyear, width))
|
||||
a('</body>\n')
|
||||
a('</html>\n')
|
||||
return ''.join(v).encode(encoding, "xmlcharrefreplace")
|
||||
|
||||
|
||||
class TimeEncoding:
|
||||
def __init__(self, locale):
|
||||
self.locale = locale
|
||||
|
||||
def __enter__(self):
|
||||
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
|
||||
_locale.setlocale(_locale.LC_TIME, self.locale)
|
||||
return _locale.getlocale(_locale.LC_TIME)[1]
|
||||
|
||||
def __exit__(self, *args):
|
||||
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
|
||||
|
||||
|
||||
class LocaleTextCalendar(TextCalendar):
|
||||
"""
|
||||
This class can be passed a locale name in the constructor and will return
|
||||
month and weekday names in the specified locale. If this locale includes
|
||||
an encoding all strings containing month and weekday names will be returned
|
||||
as unicode.
|
||||
"""
|
||||
|
||||
def __init__(self, firstweekday=0, locale=None):
|
||||
TextCalendar.__init__(self, firstweekday)
|
||||
if locale is None:
|
||||
locale = _locale.getdefaultlocale()
|
||||
self.locale = locale
|
||||
|
||||
def formatweekday(self, day, width):
|
||||
with TimeEncoding(self.locale) as encoding:
|
||||
if width >= 9:
|
||||
names = day_name
|
||||
else:
|
||||
names = day_abbr
|
||||
name = names[day]
|
||||
if encoding is not None:
|
||||
name = name.decode(encoding)
|
||||
return name[:width].center(width)
|
||||
|
||||
def formatmonthname(self, theyear, themonth, width, withyear=True):
|
||||
with TimeEncoding(self.locale) as encoding:
|
||||
s = month_name[themonth]
|
||||
if encoding is not None:
|
||||
s = s.decode(encoding)
|
||||
if withyear:
|
||||
s = "%s %r" % (s, theyear)
|
||||
return s.center(width)
|
||||
|
||||
|
||||
class LocaleHTMLCalendar(HTMLCalendar):
|
||||
"""
|
||||
This class can be passed a locale name in the constructor and will return
|
||||
month and weekday names in the specified locale. If this locale includes
|
||||
an encoding all strings containing month and weekday names will be returned
|
||||
as unicode.
|
||||
"""
|
||||
def __init__(self, firstweekday=0, locale=None):
|
||||
HTMLCalendar.__init__(self, firstweekday)
|
||||
if locale is None:
|
||||
locale = _locale.getdefaultlocale()
|
||||
self.locale = locale
|
||||
|
||||
def formatweekday(self, day):
|
||||
with TimeEncoding(self.locale) as encoding:
|
||||
s = day_abbr[day]
|
||||
if encoding is not None:
|
||||
s = s.decode(encoding)
|
||||
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
|
||||
|
||||
def formatmonthname(self, theyear, themonth, withyear=True):
|
||||
with TimeEncoding(self.locale) as encoding:
|
||||
s = month_name[themonth]
|
||||
if encoding is not None:
|
||||
s = s.decode(encoding)
|
||||
if withyear:
|
||||
s = '%s %s' % (s, theyear)
|
||||
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
|
||||
|
||||
|
||||
# Support for old module level interface
|
||||
c = TextCalendar()
|
||||
|
||||
firstweekday = c.getfirstweekday
|
||||
|
||||
def setfirstweekday(firstweekday):
|
||||
try:
|
||||
firstweekday.__index__
|
||||
except AttributeError:
|
||||
raise IllegalWeekdayError(firstweekday)
|
||||
if not MONDAY <= firstweekday <= SUNDAY:
|
||||
raise IllegalWeekdayError(firstweekday)
|
||||
c.firstweekday = firstweekday
|
||||
|
||||
monthcalendar = c.monthdayscalendar
|
||||
prweek = c.prweek
|
||||
week = c.formatweek
|
||||
weekheader = c.formatweekheader
|
||||
prmonth = c.prmonth
|
||||
month = c.formatmonth
|
||||
calendar = c.formatyear
|
||||
prcal = c.pryear
|
||||
|
||||
|
||||
# Spacing of month columns for multi-column year calendar
|
||||
_colwidth = 7*3 - 1 # Amount printed by prweek()
|
||||
_spacing = 6 # Number of spaces between columns
|
||||
|
||||
|
||||
def format(cols, colwidth=_colwidth, spacing=_spacing):
|
||||
"""Prints multi-column formatting for year calendars"""
|
||||
print formatstring(cols, colwidth, spacing)
|
||||
|
||||
|
||||
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
|
||||
"""Returns a string formatted from n strings, centered within n columns."""
|
||||
spacing *= ' '
|
||||
return spacing.join(c.center(colwidth) for c in cols)
|
||||
|
||||
|
||||
EPOCH = 1970
|
||||
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
|
||||
|
||||
|
||||
def timegm(tuple):
|
||||
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
|
||||
year, month, day, hour, minute, second = tuple[:6]
|
||||
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
|
||||
hours = days*24 + hour
|
||||
minutes = hours*60 + minute
|
||||
seconds = minutes*60 + second
|
||||
return seconds
|
||||
|
||||
|
||||
def main(args):
|
||||
import optparse
|
||||
parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
|
||||
parser.add_option(
|
||||
"-w", "--width",
|
||||
dest="width", type="int", default=2,
|
||||
help="width of date column (default 2, text only)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-l", "--lines",
|
||||
dest="lines", type="int", default=1,
|
||||
help="number of lines for each week (default 1, text only)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-s", "--spacing",
|
||||
dest="spacing", type="int", default=6,
|
||||
help="spacing between months (default 6, text only)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-m", "--months",
|
||||
dest="months", type="int", default=3,
|
||||
help="months per row (default 3, text only)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-c", "--css",
|
||||
dest="css", default="calendar.css",
|
||||
help="CSS to use for page (html only)"
|
||||
)
|
||||
parser.add_option(
|
||||
"-L", "--locale",
|
||||
dest="locale", default=None,
|
||||
help="locale to be used from month and weekday names"
|
||||
)
|
||||
parser.add_option(
|
||||
"-e", "--encoding",
|
||||
dest="encoding", default=None,
|
||||
help="Encoding to use for output"
|
||||
)
|
||||
parser.add_option(
|
||||
"-t", "--type",
|
||||
dest="type", default="text",
|
||||
choices=("text", "html"),
|
||||
help="output type (text or html)"
|
||||
)
|
||||
|
||||
(options, args) = parser.parse_args(args)
|
||||
|
||||
if options.locale and not options.encoding:
|
||||
parser.error("if --locale is specified --encoding is required")
|
||||
sys.exit(1)
|
||||
|
||||
locale = options.locale, options.encoding
|
||||
|
||||
if options.type == "html":
|
||||
if options.locale:
|
||||
cal = LocaleHTMLCalendar(locale=locale)
|
||||
else:
|
||||
cal = HTMLCalendar()
|
||||
encoding = options.encoding
|
||||
if encoding is None:
|
||||
encoding = sys.getdefaultencoding()
|
||||
optdict = dict(encoding=encoding, css=options.css)
|
||||
if len(args) == 1:
|
||||
print cal.formatyearpage(datetime.date.today().year, **optdict)
|
||||
elif len(args) == 2:
|
||||
print cal.formatyearpage(int(args[1]), **optdict)
|
||||
else:
|
||||
parser.error("incorrect number of arguments")
|
||||
sys.exit(1)
|
||||
else:
|
||||
if options.locale:
|
||||
cal = LocaleTextCalendar(locale=locale)
|
||||
else:
|
||||
cal = TextCalendar()
|
||||
optdict = dict(w=options.width, l=options.lines)
|
||||
if len(args) != 3:
|
||||
optdict["c"] = options.spacing
|
||||
optdict["m"] = options.months
|
||||
if len(args) == 1:
|
||||
result = cal.formatyear(datetime.date.today().year, **optdict)
|
||||
elif len(args) == 2:
|
||||
result = cal.formatyear(int(args[1]), **optdict)
|
||||
elif len(args) == 3:
|
||||
result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
|
||||
else:
|
||||
parser.error("incorrect number of arguments")
|
||||
sys.exit(1)
|
||||
if options.encoding:
|
||||
result = result.encode(options.encoding)
|
||||
print result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
Binary file not shown.
1059
PythonHome/Lib/cgi.py
Normal file
1059
PythonHome/Lib/cgi.py
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user