added reset to default window splitter
This commit is contained in:
parent
81675dec77
commit
aace0092fa
322 changed files with 21374 additions and 2 deletions
BIN
FeedTheMonkey.app/Contents/Resources/Icon.icns
Normal file
BIN
FeedTheMonkey.app/Contents/Resources/Icon.icns
Normal file
Binary file not shown.
43
FeedTheMonkey.app/Contents/Resources/__boot__.py
Normal file
43
FeedTheMonkey.app/Contents/Resources/__boot__.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
def _reset_sys_path():
|
||||
# Clear generic sys.path[0]
|
||||
import sys, os
|
||||
resources = os.environ['RESOURCEPATH']
|
||||
while sys.path[0] == resources:
|
||||
del sys.path[0]
|
||||
_reset_sys_path()
|
||||
|
||||
|
||||
def _chdir_resource():
|
||||
import os
|
||||
os.chdir(os.environ['RESOURCEPATH'])
|
||||
_chdir_resource()
|
||||
|
||||
|
||||
def _disable_linecache():
|
||||
import linecache
|
||||
def fake_getline(*args, **kwargs):
|
||||
return ''
|
||||
linecache.orig_getline = linecache.getline
|
||||
linecache.getline = fake_getline
|
||||
_disable_linecache()
|
||||
|
||||
|
||||
def _run():
|
||||
global __file__
|
||||
import os, sys, site
|
||||
sys.frozen = 'macosx_app'
|
||||
base = os.environ['RESOURCEPATH']
|
||||
|
||||
argv0 = os.path.basename(os.environ['ARGVZERO'])
|
||||
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT)
|
||||
|
||||
path = os.path.join(base, script)
|
||||
sys.argv[0] = __file__ = path
|
||||
with open(path, 'rU') as fp:
|
||||
source = fp.read() + "\n"
|
||||
exec(compile(source, path, 'exec'), globals(), globals())
|
||||
|
||||
|
||||
DEFAULT_SCRIPT='feedthemonkey'
|
||||
SCRIPT_MAP={}
|
||||
_run()
|
19
FeedTheMonkey.app/Contents/Resources/__error__.sh
Executable file
19
FeedTheMonkey.app/Contents/Resources/__error__.sh
Executable file
|
@ -0,0 +1,19 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# This is the default apptemplate error script
|
||||
#
|
||||
if ( test -n "$2" ) ; then
|
||||
echo "$1 Error"
|
||||
echo "An unexpected error has occurred during execution of the main script"
|
||||
echo ""
|
||||
echo "$2: $3"
|
||||
echo ""
|
||||
echo "See the Console for a detailed traceback."
|
||||
else
|
||||
echo "$1 Error"
|
||||
|
||||
# Usage: ERRORURL <anURL> <a button label>, this is used by the
|
||||
# bundle runner to put up a dialog.
|
||||
#echo "ERRORURL: http://www.python.org/ Visit the Python Website
|
||||
# echo "ERRORURL: http://homepages.cwi.nl/~jack/macpython/index.html Visit the MacPython Website"
|
||||
fi
|
537
FeedTheMonkey.app/Contents/Resources/feedthemonkey
Executable file
537
FeedTheMonkey.app/Contents/Resources/feedthemonkey
Executable file
|
@ -0,0 +1,537 @@
|
|||
#!/usr/bin/env python2
|
||||
|
||||
import sys, os, json, tempfile, urllib2, urllib, json
|
||||
from PyQt4 import QtGui, QtCore, QtWebKit, QtNetwork
|
||||
from threading import Thread
|
||||
from sys import platform as _platform
|
||||
|
||||
settings = QtCore.QSettings("jabs.nu", "feedthemonkey")
|
||||
|
||||
class MainWindow(QtGui.QMainWindow):
|
||||
def __init__(self):
|
||||
QtGui.QMainWindow.__init__(self)
|
||||
self.setWindowIcon(QtGui.QIcon("feedmonkey"))
|
||||
self.addAction(QtGui.QAction("Full Screen", self, checkable=True, toggled=lambda v: self.showFullScreen() if v else self.showNormal(), shortcut="F11"))
|
||||
self.history = self.get("history", [])
|
||||
self.restoreGeometry(QtCore.QByteArray.fromRawData(settings.value("geometry").toByteArray()))
|
||||
self.restoreState(QtCore.QByteArray.fromRawData(settings.value("state").toByteArray()))
|
||||
|
||||
self.initUI()
|
||||
|
||||
session_id = self.get("session_id")
|
||||
server_url = self.get("server_url")
|
||||
|
||||
if not (session_id and server_url):
|
||||
self.authenticate()
|
||||
else:
|
||||
self.initApp()
|
||||
|
||||
|
||||
def initUI(self):
|
||||
self.list = List(self)
|
||||
self.content = Content(self)
|
||||
|
||||
self.splitter = QtGui.QSplitter(QtCore.Qt.Vertical, self)
|
||||
self.splitter.setHandleWidth(1)
|
||||
self.splitter.addWidget(self.list)
|
||||
self.splitter.addWidget(self.content)
|
||||
self.splitter.restoreState(settings.value("splitterSizes").toByteArray());
|
||||
self.splitter.splitterMoved.connect(self.splitterMoved)
|
||||
|
||||
self.setCentralWidget(self.splitter)
|
||||
|
||||
def mkAction(name, connect, shortcut=None):
|
||||
action = QtGui.QAction(name, self)
|
||||
action.triggered.connect(connect)
|
||||
if shortcut:
|
||||
action.setShortcut(shortcut)
|
||||
return action
|
||||
|
||||
mb = self.menuBar()
|
||||
|
||||
fileMenu = mb.addMenu("&File")
|
||||
fileMenu.addAction(mkAction("&Close", self.close, "Ctrl+W"))
|
||||
fileMenu.addAction(mkAction("&Log Out", self.logOut))
|
||||
fileMenu.addSeparator()
|
||||
fileMenu.addAction(mkAction("&Exit", self.close, "Ctrl+Q"))
|
||||
|
||||
actionMenu = mb.addMenu("&Action")
|
||||
actionMenu.addAction(mkAction("&Reload", self.content.reload, "R"))
|
||||
actionMenu.addAction(mkAction("Set &Unread", self.content.setUnread, "U"))
|
||||
actionMenu.addAction(mkAction("&Next", self.content.showNext, "J"))
|
||||
actionMenu.addAction(mkAction("&Previous", self.content.showPrevious, "K"))
|
||||
actionMenu.addAction(mkAction("&Open in Browser", self.content.openCurrent, "N"))
|
||||
|
||||
viewMenu = mb.addMenu("&View")
|
||||
viewMenu.addAction(mkAction("Zoom &In", lambda: self.content.wb.setZoomFactor(self.content.wb.zoomFactor() + 0.2), "Ctrl++"))
|
||||
viewMenu.addAction(mkAction("Zoom &Out", lambda: self.content.wb.setZoomFactor(self.content.wb.zoomFactor() - 0.2), "Ctrl+-"))
|
||||
viewMenu.addAction(mkAction("&Reset", lambda: self.content.wb.setZoomFactor(1), "Ctrl+0"))
|
||||
|
||||
windowMenu = mb.addMenu("&Window")
|
||||
windowMenu.addAction(mkAction("Default", self.resetSplitter, "Ctrl+D"))
|
||||
|
||||
helpMenu = mb.addMenu("&Help")
|
||||
helpMenu.addAction(mkAction("&About", lambda: QtGui.QDesktopServices.openUrl(QtCore.QUrl("http://jabs.nu/feedthemonkey", QtCore.QUrl.TolerantMode)) ))
|
||||
|
||||
def initApp(self):
|
||||
session_id = self.get("session_id")
|
||||
server_url = self.get("server_url")
|
||||
self.tinyTinyRSS = TinyTinyRSS(self, server_url, session_id)
|
||||
|
||||
self.content.evaluateJavaScript("setArticle('loading')")
|
||||
self.content.reload()
|
||||
self.show()
|
||||
|
||||
def closeEvent(self, ev):
|
||||
settings.setValue("geometry", self.saveGeometry())
|
||||
settings.setValue("state", self.saveState())
|
||||
return QtGui.QMainWindow.closeEvent(self, ev)
|
||||
|
||||
def put(self, key, value):
|
||||
"Persist an object somewhere under a given key"
|
||||
settings.setValue(key, json.dumps(value))
|
||||
settings.sync()
|
||||
|
||||
def get(self, key, default=None):
|
||||
"Get the object stored under 'key' in persistent storage, or the default value"
|
||||
v = settings.value(key)
|
||||
return json.loads(unicode(v.toString())) if v.isValid() else default
|
||||
|
||||
def setWindowTitle(self, t):
|
||||
super(QtGui.QMainWindow, self).setWindowTitle("Feed the Monkey" + t)
|
||||
|
||||
def splitterMoved(self, pos, index):
|
||||
settings.setValue("splitterSizes", self.splitter.saveState());
|
||||
|
||||
def resetSplitter(self):
|
||||
sizes = self.splitter.sizes()
|
||||
top = sizes[0]
|
||||
bottom = sizes[1]
|
||||
sizes[0] = 200
|
||||
sizes[1] = bottom + top - 200
|
||||
self.splitter.setSizes(sizes)
|
||||
|
||||
def authenticate(self):
|
||||
|
||||
dialog = Login()
|
||||
|
||||
def callback():
|
||||
|
||||
server_url = str(dialog.textServerUrl.text())
|
||||
user = str(dialog.textName.text())
|
||||
password = str(dialog.textPass.text())
|
||||
|
||||
session_id = TinyTinyRSS.login(server_url, user, password)
|
||||
if session_id:
|
||||
self.put("session_id", session_id)
|
||||
self.put("server_url", server_url)
|
||||
self.initApp()
|
||||
else:
|
||||
self.authenticate()
|
||||
|
||||
dialog.accepted.connect(callback)
|
||||
|
||||
dialog.exec_()
|
||||
|
||||
def logOut(self):
|
||||
self.hide()
|
||||
self.content.evaluateJavaScript("setArticle('logout')")
|
||||
self.tinyTinyRSS.logOut()
|
||||
self.tinyTinyRSS = None
|
||||
self.put("session_id", None)
|
||||
self.put("server_url", None)
|
||||
self.authenticate()
|
||||
|
||||
class List(QtGui.QTableWidget):
|
||||
def __init__(self, container):
|
||||
QtGui.QTableWidget.__init__(self)
|
||||
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
|
||||
self.app = container
|
||||
self.itemSelectionChanged.connect(self.rowSelected)
|
||||
self.setShowGrid(False)
|
||||
|
||||
def initHeader(self):
|
||||
self.clear()
|
||||
self.setColumnCount(3)
|
||||
self.setHorizontalHeaderLabels(("Feed", "Title", "Date"))
|
||||
self.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.ResizeToContents)
|
||||
self.horizontalHeader().setResizeMode(1, QtGui.QHeaderView.Stretch)
|
||||
self.horizontalHeader().setResizeMode(2, QtGui.QHeaderView.ResizeToContents)
|
||||
self.verticalHeader().hide()
|
||||
|
||||
def setItems(self, articles):
|
||||
self.initHeader()
|
||||
self.setRowCount(len(articles))
|
||||
row = 0
|
||||
for article in articles:
|
||||
if "feed_title" in article:
|
||||
feed_title = QtGui.QTableWidgetItem(article["feed_title"])
|
||||
feed_title.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.setItem(row, 0, feed_title)
|
||||
if "title" in article:
|
||||
title = QtGui.QTableWidgetItem(article["title"])
|
||||
title.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.setItem(row, 1, title)
|
||||
if "updated" in article:
|
||||
date = QtCore.QDateTime.fromTime_t(article["updated"]).toString(QtCore.Qt.SystemLocaleShortDate)
|
||||
d = QtGui.QTableWidgetItem(date)
|
||||
d.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.setItem(row, 2, d)
|
||||
if "author" in article:
|
||||
author = QtGui.QTableWidgetItem(article["author"])
|
||||
author.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
|
||||
self.setItem(row, 3, author)
|
||||
self.resizeRowToContents(row)
|
||||
row += 1
|
||||
self.selectRow(0)
|
||||
|
||||
def rowSelected(self):
|
||||
indexes = self.selectedIndexes()
|
||||
if len(indexes) > 0:
|
||||
row = indexes[0].row()
|
||||
self.app.content.showIndex(row)
|
||||
|
||||
def updateRead(self):
|
||||
for row, article in enumerate(self.app.content.unread_articles):
|
||||
for x in xrange(0,3):
|
||||
item = self.item(row, x)
|
||||
font = item.font()
|
||||
font.setBold(article["unread"])
|
||||
item.setFont(font)
|
||||
|
||||
|
||||
class Content(QtGui.QWidget):
|
||||
def __init__(self, container):
|
||||
QtGui.QWidget.__init__(self)
|
||||
|
||||
self.app = container
|
||||
self.index = 0
|
||||
|
||||
self.wb = QtWebKit.QWebView(titleChanged=lambda t: container.setWindowTitle(t))
|
||||
self.wb.page().setLinkDelegationPolicy(QtWebKit.QWebPage.DelegateAllLinks)
|
||||
self.wb.linkClicked.connect(lambda url: self.openLink(url))
|
||||
|
||||
self.setLayout(QtGui.QVBoxLayout(spacing=0))
|
||||
self.layout().setContentsMargins(0, 0, 0, 0)
|
||||
self.layout().addWidget(self.wb)
|
||||
|
||||
self.do_show_next = QtGui.QShortcut(QtCore.Qt.Key_Right, self, activated=self.showNext)
|
||||
self.do_show_previous = QtGui.QShortcut(QtCore.Qt.Key_Left, self, activated=self.showPrevious)
|
||||
self.do_open = QtGui.QShortcut("Return", self, activated=self.openCurrent)
|
||||
|
||||
self.wb.settings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
|
||||
self.wb.settings().setIconDatabasePath(tempfile.mkdtemp())
|
||||
self.wb.setHtml(self.templateString())
|
||||
|
||||
self.unread_articles = []
|
||||
|
||||
def openLink(self, url):
|
||||
QtGui.QDesktopServices.openUrl(url)
|
||||
|
||||
def reload(self):
|
||||
w = WorkerThread(self.app, self._reload)
|
||||
self.connect(w, QtCore.SIGNAL("reload_done()"), self.reload_done)
|
||||
w.start()
|
||||
|
||||
def setUnread(self):
|
||||
article = self.unread_articles[self.index]
|
||||
article["unread"] = True
|
||||
article["set_unread"] = True
|
||||
self.app.list.updateRead()
|
||||
|
||||
def _reload(self):
|
||||
self.unread_articles = self.app.tinyTinyRSS.getUnreadFeeds()
|
||||
self.index = -1
|
||||
|
||||
def reload_done(self):
|
||||
self.setUnreadCount()
|
||||
if len(self.unread_articles) > 0:
|
||||
self.showNext()
|
||||
self.app.list.setItems(self.unread_articles)
|
||||
|
||||
def showIndex(self, index):
|
||||
previous = self.unread_articles[self.index]
|
||||
if not "set_unread" in previous or not previous["set_unread"]:
|
||||
self.app.tinyTinyRSS.setArticleRead(previous["id"])
|
||||
previous["unread"] = False
|
||||
self.app.list.updateRead()
|
||||
else:
|
||||
previous["set_unread"] = False
|
||||
self.index = index
|
||||
current = self.unread_articles[self.index]
|
||||
self.setArticle(current)
|
||||
self.setUnreadCount()
|
||||
|
||||
def showNext(self):
|
||||
if self.index >= 0 and self.index < len(self.unread_articles):
|
||||
previous = self.unread_articles[self.index]
|
||||
if not "set_unread" in previous or not previous["set_unread"]:
|
||||
self.app.tinyTinyRSS.setArticleRead(previous["id"])
|
||||
previous["unread"] = False
|
||||
self.app.list.updateRead()
|
||||
else:
|
||||
previous["set_unread"] = False
|
||||
|
||||
if len(self.unread_articles) > self.index + 1:
|
||||
self.index += 1
|
||||
current = self.unread_articles[self.index]
|
||||
self.setArticle(current)
|
||||
else:
|
||||
if self.index < len(self.unread_articles):
|
||||
self.index += 1
|
||||
|
||||
self.setUnreadCount()
|
||||
self.app.list.selectRow(self.index)
|
||||
|
||||
def showPrevious(self):
|
||||
if self.index > 0:
|
||||
self.index -= 1
|
||||
previous = self.unread_articles[self.index]
|
||||
self.setArticle(previous)
|
||||
self.setUnreadCount()
|
||||
self.app.list.selectRow(self.index)
|
||||
|
||||
def openCurrent(self):
|
||||
current = self.unread_articles[self.index]
|
||||
url = QtCore.QUrl(current["link"])
|
||||
self.openLink(url)
|
||||
|
||||
def setArticle(self, article):
|
||||
func = u"setArticle({});".format(json.dumps(article))
|
||||
self.evaluateJavaScript(func)
|
||||
|
||||
def evaluateJavaScript(self, func):
|
||||
return self.wb.page().mainFrame().evaluateJavaScript(func)
|
||||
|
||||
def setUnreadCount(self):
|
||||
length = len(self.unread_articles)
|
||||
i = 0
|
||||
if self.index > 0:
|
||||
i = self.index
|
||||
unread = length - i
|
||||
|
||||
self.app.setWindowTitle(" (" + str(unread) + "/" + str(length) + ")")
|
||||
if unread < 1:
|
||||
self.evaluateJavaScript("setArticle('empty')")
|
||||
|
||||
def templateString(self):
|
||||
html="""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>ttrssl</title>
|
||||
<script type="text/javascript">
|
||||
function $(id) {
|
||||
return document.getElementById(id);
|
||||
}
|
||||
|
||||
function setArticle(article) {
|
||||
window.scrollTo(0, 0);
|
||||
|
||||
$("date").innerHTML = "";
|
||||
$("title").innerHTML = "";
|
||||
$("title").href = "";
|
||||
$("title").title = "";
|
||||
$("feed_title").innerHTML = "";
|
||||
$("author").innerHTML = "";
|
||||
$("article").innerHTML = "";
|
||||
|
||||
if(article == "empty") {
|
||||
|
||||
$("article").innerHTML = "No unread articles to display.";
|
||||
|
||||
} else if(article == "loading") {
|
||||
|
||||
$("article").innerHTML = "Loading <blink>…</blink>";
|
||||
|
||||
} else if (article == "logout") {
|
||||
|
||||
} else if(article) {
|
||||
|
||||
$("date").innerHTML = (new Date(parseInt(article.updated, 10) * 1000));
|
||||
$("title").innerHTML = article.title;
|
||||
$("title").href = article.link;
|
||||
$("title").title = article.link;
|
||||
$("feed_title").innerHTML = article.feed_title;
|
||||
$("author").innerHTML = "";
|
||||
if(article.author && article.author.length > 0)
|
||||
$("author").innerHTML = "– " + article.author
|
||||
$("article").innerHTML = article.content;
|
||||
}
|
||||
}
|
||||
</script>
|
||||
<style type="text/css">
|
||||
body {
|
||||
font-family: "Ubuntu", "Lucida Grande", "Tahoma", sans-serif;
|
||||
padding: 1em 2em 1em 2em;
|
||||
}
|
||||
body.darwin {
|
||||
font-family: "LucidaGrande", sans-serif;
|
||||
}
|
||||
h1 {
|
||||
font-weight: normal;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
header {
|
||||
margin-bottom: 1em;
|
||||
border-bottom: 1px solid #aaa;
|
||||
padding-bottom: 1em;
|
||||
}
|
||||
header p {
|
||||
color: #aaa;
|
||||
margin: 0;
|
||||
padding: 0
|
||||
}
|
||||
a {
|
||||
color: #772953;
|
||||
text-decoration: none;
|
||||
}
|
||||
img {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
article {
|
||||
line-height: 1.6;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body class='""" + _platform + """''>
|
||||
<header>
|
||||
<p><span id="feed_title"></span> <span id="author"></span></p>
|
||||
<h1><a id="title" href=""></a></h1>
|
||||
<p><timedate id="date"></timedate></p>
|
||||
</header>
|
||||
<article id="article"></article>
|
||||
</body>
|
||||
</html>"""
|
||||
return html # string.replace(html, "<body", "<body class='" + _platform + "'")
|
||||
|
||||
|
||||
|
||||
class TinyTinyRSS:
|
||||
def __init__(self, app, server_url, session_id):
|
||||
self.app = app
|
||||
if server_url and session_id:
|
||||
self.server_url = server_url
|
||||
self.session_id = session_id
|
||||
else:
|
||||
self.app.authenticate()
|
||||
|
||||
def doOperation(self, operation, options=None):
|
||||
url = self.server_url + "/api/"
|
||||
default_options = {'sid': self.session_id, 'op': operation}
|
||||
if options:
|
||||
options = dict(default_options.items() + options.items())
|
||||
else:
|
||||
options = default_options
|
||||
json_string = json.dumps(options)
|
||||
req = urllib2.Request(url)
|
||||
fd = urllib2.urlopen(req, json_string)
|
||||
body = ""
|
||||
while True:
|
||||
data = fd.read(1024)
|
||||
if not len(data):
|
||||
break
|
||||
body += data
|
||||
|
||||
return json.loads(body)["content"]
|
||||
|
||||
def getUnreadFeeds(self):
|
||||
unread_articles = []
|
||||
def more(skip):
|
||||
return self.doOperation("getHeadlines", {"show_excerpt": False, "view_mode": "unread", "show_content": True, "feed_id": -4, "skip": skip})
|
||||
|
||||
skip = 0
|
||||
while True:
|
||||
new = more( skip)
|
||||
unread_articles += new
|
||||
length = len(new)
|
||||
|
||||
if length < 1:
|
||||
break
|
||||
skip += length
|
||||
|
||||
return unread_articles
|
||||
|
||||
def setArticleRead(self, article_id):
|
||||
l = lambda: self.doOperation("updateArticle", {'article_ids':article_id, 'mode': 0, 'field': 2})
|
||||
t = Thread(target=l)
|
||||
t.start()
|
||||
|
||||
def logOut(self):
|
||||
self.doOperation("logout")
|
||||
|
||||
@classmethod
|
||||
def login(self, server_url, user, password):
|
||||
url = server_url + "/api/"
|
||||
options = {"op": "login", "user": user, "password": password}
|
||||
json_string = json.dumps(options)
|
||||
req = urllib2.Request(url)
|
||||
fd = urllib2.urlopen(req, json_string)
|
||||
body = ""
|
||||
while 1:
|
||||
data = fd.read(1024)
|
||||
if not len(data):
|
||||
break
|
||||
body += data
|
||||
|
||||
body = json.loads(body)["content"]
|
||||
|
||||
if body.has_key("error"):
|
||||
msgBox = QtGui.QMessageBox()
|
||||
msgBox.setText(body["error"])
|
||||
msgBox.exec_()
|
||||
return None
|
||||
|
||||
return body["session_id"]
|
||||
|
||||
|
||||
class Login(QtGui.QDialog):
|
||||
def __init__(self):
|
||||
QtGui.QDialog.__init__(self)
|
||||
self.setWindowIcon(QtGui.QIcon("feedmonkey.png"))
|
||||
self.setWindowTitle("Feed the Monkey - Login")
|
||||
|
||||
self.label = QtGui.QLabel(self)
|
||||
self.label.setText("Please specify a server url, a username and a password.")
|
||||
|
||||
self.textServerUrl = QtGui.QLineEdit(self)
|
||||
self.textServerUrl.setPlaceholderText("http://example.com/ttrss/")
|
||||
self.textServerUrl.setText("http://")
|
||||
|
||||
self.textName = QtGui.QLineEdit(self)
|
||||
self.textName.setPlaceholderText("username")
|
||||
|
||||
self.textPass = QtGui.QLineEdit(self)
|
||||
self.textPass.setEchoMode(QtGui.QLineEdit.Password);
|
||||
self.textPass.setPlaceholderText("password")
|
||||
|
||||
self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok)
|
||||
self.buttons.accepted.connect(self.accept)
|
||||
|
||||
layout = QtGui.QVBoxLayout(self)
|
||||
layout.addWidget(self.label)
|
||||
layout.addWidget(self.textServerUrl)
|
||||
layout.addWidget(self.textName)
|
||||
layout.addWidget(self.textPass)
|
||||
layout.addWidget(self.buttons)
|
||||
|
||||
|
||||
class WorkerThread(QtCore.QThread):
|
||||
|
||||
def __init__(self, parent, do_reload):
|
||||
super(WorkerThread, self).__init__(parent)
|
||||
self.do_reload = do_reload
|
||||
|
||||
def run(self):
|
||||
self.do_reload()
|
||||
self.emit(QtCore.SIGNAL("reload_done()"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = QtGui.QApplication(sys.argv)
|
||||
wb = MainWindow()
|
||||
wb.show()
|
||||
sys.exit(app.exec_())
|
1267
FeedTheMonkey.app/Contents/Resources/include/python2.7/pyconfig.h
Normal file
1267
FeedTheMonkey.app/Contents/Resources/include/python2.7/pyconfig.h
Normal file
File diff suppressed because it is too large
Load diff
1397
FeedTheMonkey.app/Contents/Resources/lib/python2.7/config/Makefile
Normal file
1397
FeedTheMonkey.app/Contents/Resources/lib/python2.7/config/Makefile
Normal file
File diff suppressed because it is too large
Load diff
499
FeedTheMonkey.app/Contents/Resources/lib/python2.7/config/Setup
Normal file
499
FeedTheMonkey.app/Contents/Resources/lib/python2.7/config/Setup
Normal file
|
@ -0,0 +1,499 @@
|
|||
# -*- makefile -*-
|
||||
# The file Setup is used by the makesetup script to construct the files
|
||||
# Makefile and config.c, from Makefile.pre and config.c.in,
|
||||
# respectively. The file Setup itself is initially copied from
|
||||
# Setup.dist; once it exists it will not be overwritten, so you can edit
|
||||
# Setup to your heart's content. Note that Makefile.pre is created
|
||||
# from Makefile.pre.in by the toplevel configure script.
|
||||
|
||||
# (VPATH notes: Setup and Makefile.pre are in the build directory, as
|
||||
# are Makefile and config.c; the *.in and *.dist files are in the source
|
||||
# directory.)
|
||||
|
||||
# Each line in this file describes one or more optional modules.
|
||||
# Modules enabled here will not be compiled by the setup.py script,
|
||||
# so the file can be used to override setup.py's behavior.
|
||||
|
||||
# Lines have the following structure:
|
||||
#
|
||||
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
|
||||
#
|
||||
# <sourcefile> is anything ending in .c (.C, .cc, .c++ are C++ files)
|
||||
# <cpparg> is anything starting with -I, -D, -U or -C
|
||||
# <library> is anything ending in .a or beginning with -l or -L
|
||||
# <module> is anything else but should be a valid Python
|
||||
# identifier (letters, digits, underscores, beginning with non-digit)
|
||||
#
|
||||
# (As the makesetup script changes, it may recognize some other
|
||||
# arguments as well, e.g. *.so and *.sl as libraries. See the big
|
||||
# case statement in the makesetup script.)
|
||||
#
|
||||
# Lines can also have the form
|
||||
#
|
||||
# <name> = <value>
|
||||
#
|
||||
# which defines a Make variable definition inserted into Makefile.in
|
||||
#
|
||||
# Finally, if a line contains just the word "*shared*" (without the
|
||||
# quotes but with the stars), then the following modules will not be
|
||||
# built statically. The build process works like this:
|
||||
#
|
||||
# 1. Build all modules that are declared as static in Modules/Setup,
|
||||
# combine them into libpythonxy.a, combine that into python.
|
||||
# 2. Build all modules that are listed as shared in Modules/Setup.
|
||||
# 3. Invoke setup.py. That builds all modules that
|
||||
# a) are not builtin, and
|
||||
# b) are not listed in Modules/Setup, and
|
||||
# c) can be build on the target
|
||||
#
|
||||
# Therefore, modules declared to be shared will not be
|
||||
# included in the config.c file, nor in the list of objects to be
|
||||
# added to the library archive, and their linker options won't be
|
||||
# added to the linker options. Rules to create their .o files and
|
||||
# their shared libraries will still be added to the Makefile, and
|
||||
# their names will be collected in the Make variable SHAREDMODS. This
|
||||
# is used to build modules as shared libraries. (They can be
|
||||
# installed using "make sharedinstall", which is implied by the
|
||||
# toplevel "make install" target.) (For compatibility,
|
||||
# *noconfig* has the same effect as *shared*.)
|
||||
#
|
||||
# In addition, *static* explicitly declares the following modules to
|
||||
# be static. Lines containing "*static*" and "*shared*" may thus
|
||||
# alternate throughout this file.
|
||||
|
||||
# NOTE: As a standard policy, as many modules as can be supported by a
|
||||
# platform should be present. The distribution comes with all modules
|
||||
# enabled that are supported by most platforms and don't require you
|
||||
# to ftp sources from elsewhere.
|
||||
|
||||
|
||||
# Some special rules to define PYTHONPATH.
|
||||
# Edit the definitions below to indicate which options you are using.
|
||||
# Don't add any whitespace or comments!
|
||||
|
||||
# Directories where library files get installed.
|
||||
# DESTLIB is for Python modules; MACHDESTLIB for shared libraries.
|
||||
DESTLIB=$(LIBDEST)
|
||||
MACHDESTLIB=$(BINLIBDEST)
|
||||
|
||||
# NOTE: all the paths are now relative to the prefix that is computed
|
||||
# at run time!
|
||||
|
||||
# Standard path -- don't edit.
|
||||
# No leading colon since this is the first entry.
|
||||
# Empty since this is now just the runtime prefix.
|
||||
DESTPATH=
|
||||
|
||||
# Site specific path components -- should begin with : if non-empty
|
||||
SITEPATH=
|
||||
|
||||
# Standard path components for test modules
|
||||
TESTPATH=
|
||||
|
||||
# Path components for machine- or system-dependent modules and shared libraries
|
||||
MACHDEPPATH=:plat-$(MACHDEP)
|
||||
EXTRAMACHDEPPATH=
|
||||
|
||||
# Path component for the Tkinter-related modules
|
||||
# The TKPATH variable is always enabled, to save you the effort.
|
||||
TKPATH=:lib-tk
|
||||
|
||||
# Path component for old modules.
|
||||
OLDPATH=:lib-old
|
||||
|
||||
COREPYTHONPATH=$(DESTPATH)$(SITEPATH)$(TESTPATH)$(MACHDEPPATH)$(EXTRAMACHDEPPATH)$(TKPATH)$(OLDPATH)
|
||||
PYTHONPATH=$(COREPYTHONPATH)
|
||||
|
||||
|
||||
# The modules listed here can't be built as shared libraries for
|
||||
# various reasons; therefore they are listed here instead of in the
|
||||
# normal order.
|
||||
|
||||
# This only contains the minimal set of modules required to run the
|
||||
# setup.py script in the root of the Python source tree.
|
||||
|
||||
posix posixmodule.c # posix (UNIX) system calls
|
||||
errno errnomodule.c # posix (UNIX) errno values
|
||||
pwd pwdmodule.c # this is needed to find out the user's home dir
|
||||
# if $HOME is not set
|
||||
_sre _sre.c # Fredrik Lundh's new regular expressions
|
||||
_codecs _codecsmodule.c # access to the builtin codecs and codec registry
|
||||
_weakref _weakref.c # weak references
|
||||
|
||||
# The zipimport module is always imported at startup. Having it as a
|
||||
# builtin module avoids some bootstrapping problems and reduces overhead.
|
||||
zipimport zipimport.c
|
||||
|
||||
# The rest of the modules listed in this file are all commented out by
|
||||
# default. Usually they can be detected and built as dynamically
|
||||
# loaded modules by the new setup.py script added in Python 2.1. If
|
||||
# you're on a platform that doesn't support dynamic loading, want to
|
||||
# compile modules statically into the Python binary, or need to
|
||||
# specify some odd set of compiler switches, you can uncomment the
|
||||
# appropriate lines below.
|
||||
|
||||
# ======================================================================
|
||||
|
||||
# The Python symtable module depends on .h files that setup.py doesn't track
|
||||
_symtable symtablemodule.c
|
||||
|
||||
# The SGI specific GL module:
|
||||
|
||||
GLHACK=-Dclear=__GLclear
|
||||
#gl glmodule.c cgensupport.c -I$(srcdir) $(GLHACK) -lgl -lX11
|
||||
|
||||
# Pure module. Cannot be linked dynamically.
|
||||
# -DWITH_QUANTIFY, -DWITH_PURIFY, or -DWITH_ALL_PURE
|
||||
#WHICH_PURE_PRODUCTS=-DWITH_ALL_PURE
|
||||
#PURE_INCLS=-I/usr/local/include
|
||||
#PURE_STUBLIBS=-L/usr/local/lib -lpurify_stubs -lquantify_stubs
|
||||
#pure puremodule.c $(WHICH_PURE_PRODUCTS) $(PURE_INCLS) $(PURE_STUBLIBS)
|
||||
|
||||
# Uncommenting the following line tells makesetup that all following
|
||||
# modules are to be built as shared libraries (see above for more
|
||||
# detail; also note that *static* reverses this effect):
|
||||
|
||||
#*shared*
|
||||
|
||||
# GNU readline. Unlike previous Python incarnations, GNU readline is
|
||||
# now incorporated in an optional module, configured in the Setup file
|
||||
# instead of by a configure script switch. You may have to insert a
|
||||
# -L option pointing to the directory where libreadline.* lives,
|
||||
# and you may have to change -ltermcap to -ltermlib or perhaps remove
|
||||
# it, depending on your system -- see the GNU readline instructions.
|
||||
# It's okay for this to be a shared library, too.
|
||||
|
||||
#readline readline.c -lreadline -ltermcap
|
||||
|
||||
|
||||
# Modules that should always be present (non UNIX dependent):
|
||||
|
||||
#array arraymodule.c # array objects
|
||||
#cmath cmathmodule.c _math.c # -lm # complex math library functions
|
||||
#math mathmodule.c _math.c # -lm # math library functions, e.g. sin()
|
||||
#_struct _struct.c # binary structure packing/unpacking
|
||||
#time timemodule.c # -lm # time operations and variables
|
||||
#operator operator.c # operator.add() and similar goodies
|
||||
#_testcapi _testcapimodule.c # Python C API test module
|
||||
#_random _randommodule.c # Random number generator
|
||||
#_collections _collectionsmodule.c # Container types
|
||||
#_heapq _heapqmodule.c # Heapq type
|
||||
#itertools itertoolsmodule.c # Functions creating iterators for efficient looping
|
||||
#strop stropmodule.c # String manipulations
|
||||
#_functools _functoolsmodule.c # Tools for working with functions and callable objects
|
||||
#_elementtree -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI _elementtree.c # elementtree accelerator
|
||||
#_pickle _pickle.c # pickle accelerator
|
||||
#datetime datetimemodule.c # date/time type
|
||||
#_bisect _bisectmodule.c # Bisection algorithms
|
||||
|
||||
#unicodedata unicodedata.c # static Unicode character database
|
||||
|
||||
# access to ISO C locale support
|
||||
#_locale _localemodule.c # -lintl
|
||||
|
||||
# Standard I/O baseline
|
||||
#_io -I$(srcdir)/Modules/_io _io/bufferedio.c _io/bytesio.c _io/fileio.c _io/iobase.c _io/_iomodule.c _io/stringio.c _io/textio.c
|
||||
|
||||
|
||||
# Modules with some UNIX dependencies -- on by default:
|
||||
# (If you have a really backward UNIX, select and socket may not be
|
||||
# supported...)
|
||||
|
||||
#fcntl fcntlmodule.c # fcntl(2) and ioctl(2)
|
||||
#spwd spwdmodule.c # spwd(3)
|
||||
#grp grpmodule.c # grp(3)
|
||||
#select selectmodule.c # select(2); not on ancient System V
|
||||
|
||||
# Memory-mapped files (also works on Win32).
|
||||
#mmap mmapmodule.c
|
||||
|
||||
# CSV file helper
|
||||
#_csv _csv.c
|
||||
|
||||
# Socket module helper for socket(2)
|
||||
#_socket socketmodule.c timemodule.c
|
||||
|
||||
# Socket module helper for SSL support; you must comment out the other
|
||||
# socket line above, and possibly edit the SSL variable:
|
||||
#SSL=/usr/local/ssl
|
||||
#_ssl _ssl.c \
|
||||
# -DUSE_SSL -I$(SSL)/include -I$(SSL)/include/openssl \
|
||||
# -L$(SSL)/lib -lssl -lcrypto
|
||||
|
||||
# The crypt module is now disabled by default because it breaks builds
|
||||
# on many systems (where -lcrypt is needed), e.g. Linux (I believe).
|
||||
#
|
||||
# First, look at Setup.config; configure may have set this for you.
|
||||
|
||||
#crypt cryptmodule.c # -lcrypt # crypt(3); needs -lcrypt on some systems
|
||||
|
||||
|
||||
# Some more UNIX dependent modules -- off by default, since these
|
||||
# are not supported by all UNIX systems:
|
||||
|
||||
#nis nismodule.c -lnsl # Sun yellow pages -- not everywhere
|
||||
#termios termios.c # Steen Lumholt's termios module
|
||||
#resource resource.c # Jeremy Hylton's rlimit interface
|
||||
|
||||
|
||||
# Multimedia modules -- off by default.
|
||||
# These don't work for 64-bit platforms!!!
|
||||
# #993173 says audioop works on 64-bit platforms, though.
|
||||
# These represent audio samples or images as strings:
|
||||
|
||||
#audioop audioop.c # Operations on audio samples
|
||||
#imageop imageop.c # Operations on images
|
||||
|
||||
|
||||
# Note that the _md5 and _sha modules are normally only built if the
|
||||
# system does not have the OpenSSL libs containing an optimized version.
|
||||
|
||||
# The _md5 module implements the RSA Data Security, Inc. MD5
|
||||
# Message-Digest Algorithm, described in RFC 1321. The necessary files
|
||||
# md5.c and md5.h are included here.
|
||||
|
||||
#_md5 md5module.c md5.c
|
||||
|
||||
|
||||
# The _sha module implements the SHA checksum algorithms.
|
||||
# (NIST's Secure Hash Algorithms.)
|
||||
#_sha shamodule.c
|
||||
#_sha256 sha256module.c
|
||||
#_sha512 sha512module.c
|
||||
|
||||
|
||||
# SGI IRIX specific modules -- off by default.
|
||||
|
||||
# These module work on any SGI machine:
|
||||
|
||||
# *** gl must be enabled higher up in this file ***
|
||||
#fm fmmodule.c $(GLHACK) -lfm -lgl # Font Manager
|
||||
#sgi sgimodule.c # sgi.nap() and a few more
|
||||
|
||||
# This module requires the header file
|
||||
# /usr/people/4Dgifts/iristools/include/izoom.h:
|
||||
#imgfile imgfile.c -limage -lgutil -lgl -lm # Image Processing Utilities
|
||||
|
||||
|
||||
# These modules require the Multimedia Development Option (I think):
|
||||
|
||||
#al almodule.c -laudio # Audio Library
|
||||
#cd cdmodule.c -lcdaudio -lds -lmediad # CD Audio Library
|
||||
#cl clmodule.c -lcl -lawareaudio # Compression Library
|
||||
#sv svmodule.c yuvconvert.c -lsvideo -lXext -lX11 # Starter Video
|
||||
|
||||
|
||||
# The FORMS library, by Mark Overmars, implements user interface
|
||||
# components such as dialogs and buttons using SGI's GL and FM
|
||||
# libraries. You must ftp the FORMS library separately from
|
||||
# ftp://ftp.cs.ruu.nl/pub/SGI/FORMS. It was tested with FORMS 2.2a.
|
||||
# NOTE: if you want to be able to use FORMS and curses simultaneously
|
||||
# (or both link them statically into the same binary), you must
|
||||
# compile all of FORMS with the cc option "-Dclear=__GLclear".
|
||||
|
||||
# The FORMS variable must point to the FORMS subdirectory of the forms
|
||||
# toplevel directory:
|
||||
|
||||
#FORMS=/ufs/guido/src/forms/FORMS
|
||||
#fl flmodule.c -I$(FORMS) $(GLHACK) $(FORMS)/libforms.a -lfm -lgl
|
||||
|
||||
|
||||
# SunOS specific modules -- off by default:
|
||||
|
||||
#sunaudiodev sunaudiodev.c
|
||||
|
||||
|
||||
# A Linux specific module -- off by default; this may also work on
|
||||
# some *BSDs.
|
||||
|
||||
#linuxaudiodev linuxaudiodev.c
|
||||
|
||||
|
||||
# George Neville-Neil's timing module:
|
||||
|
||||
#timing timingmodule.c
|
||||
|
||||
|
||||
# The _tkinter module.
|
||||
#
|
||||
# The command for _tkinter is long and site specific. Please
|
||||
# uncomment and/or edit those parts as indicated. If you don't have a
|
||||
# specific extension (e.g. Tix or BLT), leave the corresponding line
|
||||
# commented out. (Leave the trailing backslashes in! If you
|
||||
# experience strange errors, you may want to join all uncommented
|
||||
# lines and remove the backslashes -- the backslash interpretation is
|
||||
# done by the shell's "read" command and it may not be implemented on
|
||||
# every system.
|
||||
|
||||
# *** Always uncomment this (leave the leading underscore in!):
|
||||
# _tkinter _tkinter.c tkappinit.c -DWITH_APPINIT \
|
||||
# *** Uncomment and edit to reflect where your Tcl/Tk libraries are:
|
||||
# -L/usr/local/lib \
|
||||
# *** Uncomment and edit to reflect where your Tcl/Tk headers are:
|
||||
# -I/usr/local/include \
|
||||
# *** Uncomment and edit to reflect where your X11 header files are:
|
||||
# -I/usr/X11R6/include \
|
||||
# *** Or uncomment this for Solaris:
|
||||
# -I/usr/openwin/include \
|
||||
# *** Uncomment and edit for Tix extension only:
|
||||
# -DWITH_TIX -ltix8.1.8.2 \
|
||||
# *** Uncomment and edit for BLT extension only:
|
||||
# -DWITH_BLT -I/usr/local/blt/blt8.0-unoff/include -lBLT8.0 \
|
||||
# *** Uncomment and edit for PIL (TkImaging) extension only:
|
||||
# (See http://www.pythonware.com/products/pil/ for more info)
|
||||
# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \
|
||||
# *** Uncomment and edit for TOGL extension only:
|
||||
# -DWITH_TOGL togl.c \
|
||||
# *** Uncomment and edit to reflect your Tcl/Tk versions:
|
||||
# -ltk8.2 -ltcl8.2 \
|
||||
# *** Uncomment and edit to reflect where your X11 libraries are:
|
||||
# -L/usr/X11R6/lib \
|
||||
# *** Or uncomment this for Solaris:
|
||||
# -L/usr/openwin/lib \
|
||||
# *** Uncomment these for TOGL extension only:
|
||||
# -lGL -lGLU -lXext -lXmu \
|
||||
# *** Uncomment for AIX:
|
||||
# -lld \
|
||||
# *** Always uncomment this; X11 libraries to link with:
|
||||
# -lX11
|
||||
|
||||
# Lance Ellinghaus's syslog module
|
||||
#syslog syslogmodule.c # syslog daemon interface
|
||||
|
||||
|
||||
# Curses support, requring the System V version of curses, often
|
||||
# provided by the ncurses library. e.g. on Linux, link with -lncurses
|
||||
# instead of -lcurses).
|
||||
#
|
||||
# First, look at Setup.config; configure may have set this for you.
|
||||
|
||||
#_curses _cursesmodule.c -lcurses -ltermcap
|
||||
# Wrapper for the panel library that's part of ncurses and SYSV curses.
|
||||
#_curses_panel _curses_panel.c -lpanel -lncurses
|
||||
|
||||
|
||||
# Generic (SunOS / SVR4) dynamic loading module.
|
||||
# This is not needed for dynamic loading of Python modules --
|
||||
# it is a highly experimental and dangerous device for calling
|
||||
# *arbitrary* C functions in *arbitrary* shared libraries:
|
||||
|
||||
#dl dlmodule.c
|
||||
|
||||
|
||||
# Modules that provide persistent dictionary-like semantics. You will
|
||||
# probably want to arrange for at least one of them to be available on
|
||||
# your machine, though none are defined by default because of library
|
||||
# dependencies. The Python module anydbm.py provides an
|
||||
# implementation independent wrapper for these; dumbdbm.py provides
|
||||
# similar functionality (but slower of course) implemented in Python.
|
||||
|
||||
# The standard Unix dbm module has been moved to Setup.config so that
|
||||
# it will be compiled as a shared library by default. Compiling it as
|
||||
# a built-in module causes conflicts with the pybsddb3 module since it
|
||||
# creates a static dependency on an out-of-date version of db.so.
|
||||
#
|
||||
# First, look at Setup.config; configure may have set this for you.
|
||||
|
||||
#dbm dbmmodule.c # dbm(3) may require -lndbm or similar
|
||||
|
||||
# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm:
|
||||
#
|
||||
# First, look at Setup.config; configure may have set this for you.
|
||||
|
||||
#gdbm gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm
|
||||
|
||||
|
||||
# Sleepycat Berkeley DB interface.
|
||||
#
|
||||
# This requires the Sleepycat DB code, see http://www.sleepycat.com/
|
||||
# The earliest supported version of that library is 3.0, the latest
|
||||
# supported version is 4.0 (4.1 is specifically not supported, as that
|
||||
# changes the semantics of transactional databases). A list of available
|
||||
# releases can be found at
|
||||
#
|
||||
# http://www.sleepycat.com/update/index.html
|
||||
#
|
||||
# Edit the variables DB and DBLIBVERto point to the db top directory
|
||||
# and the subdirectory of PORT where you built it.
|
||||
#DB=/usr/local/BerkeleyDB.4.0
|
||||
#DBLIBVER=4.0
|
||||
#DBINC=$(DB)/include
|
||||
#DBLIB=$(DB)/lib
|
||||
#_bsddb _bsddb.c -I$(DBINC) -L$(DBLIB) -ldb-$(DBLIBVER)
|
||||
|
||||
# Historical Berkeley DB 1.85
|
||||
#
|
||||
# This module is deprecated; the 1.85 version of the Berkeley DB library has
|
||||
# bugs that can cause data corruption. If you can, use later versions of the
|
||||
# library instead, available from <http://www.sleepycat.com/>.
|
||||
|
||||
#DB=/depot/sundry/src/berkeley-db/db.1.85
|
||||
#DBPORT=$(DB)/PORT/irix.5.3
|
||||
#bsddb185 bsddbmodule.c -I$(DBPORT)/include -I$(DBPORT) $(DBPORT)/libdb.a
|
||||
|
||||
|
||||
|
||||
# Helper module for various ascii-encoders
|
||||
#binascii binascii.c
|
||||
|
||||
# Fred Drake's interface to the Python parser
|
||||
#parser parsermodule.c
|
||||
|
||||
# cStringIO and cPickle
|
||||
#cStringIO cStringIO.c
|
||||
#cPickle cPickle.c
|
||||
|
||||
|
||||
# Lee Busby's SIGFPE modules.
|
||||
# The library to link fpectl with is platform specific.
|
||||
# Choose *one* of the options below for fpectl:
|
||||
|
||||
# For SGI IRIX (tested on 5.3):
|
||||
#fpectl fpectlmodule.c -lfpe
|
||||
|
||||
# For Solaris with SunPro compiler (tested on Solaris 2.5 with SunPro C 4.2):
|
||||
# (Without the compiler you don't have -lsunmath.)
|
||||
#fpectl fpectlmodule.c -R/opt/SUNWspro/lib -lsunmath -lm
|
||||
|
||||
# For other systems: see instructions in fpectlmodule.c.
|
||||
#fpectl fpectlmodule.c ...
|
||||
|
||||
# Test module for fpectl. No extra libraries needed.
|
||||
#fpetest fpetestmodule.c
|
||||
|
||||
# Andrew Kuchling's zlib module.
|
||||
# This require zlib 1.1.3 (or later).
|
||||
# See http://www.gzip.org/zlib/
|
||||
#zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz
|
||||
|
||||
# Interface to the Expat XML parser
|
||||
#
|
||||
# Expat was written by James Clark and is now maintained by a group of
|
||||
# developers on SourceForge; see www.libexpat.org for more
|
||||
# information. The pyexpat module was written by Paul Prescod after a
|
||||
# prototype by Jack Jansen. Source of Expat 1.95.2 is included in
|
||||
# Modules/expat/. Usage of a system shared libexpat.so/expat.dll is
|
||||
# not advised.
|
||||
#
|
||||
# More information on Expat can be found at www.libexpat.org.
|
||||
#
|
||||
#pyexpat expat/xmlparse.c expat/xmlrole.c expat/xmltok.c pyexpat.c -I$(srcdir)/Modules/expat -DHAVE_EXPAT_CONFIG_H -DUSE_PYEXPAT_CAPI
|
||||
|
||||
|
||||
# Hye-Shik Chang's CJKCodecs
|
||||
|
||||
# multibytecodec is required for all the other CJK codec modules
|
||||
#_multibytecodec cjkcodecs/multibytecodec.c
|
||||
|
||||
#_codecs_cn cjkcodecs/_codecs_cn.c
|
||||
#_codecs_hk cjkcodecs/_codecs_hk.c
|
||||
#_codecs_iso2022 cjkcodecs/_codecs_iso2022.c
|
||||
#_codecs_jp cjkcodecs/_codecs_jp.c
|
||||
#_codecs_kr cjkcodecs/_codecs_kr.c
|
||||
#_codecs_tw cjkcodecs/_codecs_tw.c
|
||||
|
||||
# Example -- included for reference only:
|
||||
# xx xxmodule.c
|
||||
|
||||
# Another example -- the 'xxsubtype' module shows C-level subtyping in action
|
||||
xxsubtype xxsubtype.c
|
|
@ -0,0 +1,13 @@
|
|||
# This file is transmogrified into Setup.config by config.status.
|
||||
|
||||
# The purpose of this file is to conditionally enable certain modules
|
||||
# based on configure-time options.
|
||||
|
||||
# Threading
|
||||
thread threadmodule.c
|
||||
|
||||
# The signal module
|
||||
signal signalmodule.c
|
||||
|
||||
# The rest of the modules previously listed in this file are built
|
||||
# by the setup.py script in Python 2.1 and later.
|
|
@ -0,0 +1 @@
|
|||
# Edit this file for local setup changes
|
|
@ -0,0 +1,123 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""A package for parsing, handling, and generating email messages."""
|
||||
|
||||
__version__ = '4.0.3'
|
||||
|
||||
__all__ = [
|
||||
# Old names
|
||||
'base64MIME',
|
||||
'Charset',
|
||||
'Encoders',
|
||||
'Errors',
|
||||
'Generator',
|
||||
'Header',
|
||||
'Iterators',
|
||||
'Message',
|
||||
'MIMEAudio',
|
||||
'MIMEBase',
|
||||
'MIMEImage',
|
||||
'MIMEMessage',
|
||||
'MIMEMultipart',
|
||||
'MIMENonMultipart',
|
||||
'MIMEText',
|
||||
'Parser',
|
||||
'quopriMIME',
|
||||
'Utils',
|
||||
'message_from_string',
|
||||
'message_from_file',
|
||||
# new names
|
||||
'base64mime',
|
||||
'charset',
|
||||
'encoders',
|
||||
'errors',
|
||||
'generator',
|
||||
'header',
|
||||
'iterators',
|
||||
'message',
|
||||
'mime',
|
||||
'parser',
|
||||
'quoprimime',
|
||||
'utils',
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Some convenience routines. Don't import Parser and Message as side-effects
|
||||
# of importing email since those cascadingly import most of the rest of the
|
||||
# email package.
|
||||
def message_from_string(s, *args, **kws):
|
||||
"""Parse a string into a Message object model.
|
||||
|
||||
Optional _class and strict are passed to the Parser constructor.
|
||||
"""
|
||||
from email.parser import Parser
|
||||
return Parser(*args, **kws).parsestr(s)
|
||||
|
||||
|
||||
def message_from_file(fp, *args, **kws):
|
||||
"""Read a file and parse its contents into a Message object model.
|
||||
|
||||
Optional _class and strict are passed to the Parser constructor.
|
||||
"""
|
||||
from email.parser import Parser
|
||||
return Parser(*args, **kws).parse(fp)
|
||||
|
||||
|
||||
|
||||
# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
|
||||
# email 4.0 module names), to old-style names (email 3.0 module names).
|
||||
import sys
|
||||
|
||||
class LazyImporter(object):
|
||||
def __init__(self, module_name):
|
||||
self.__name__ = 'email.' + module_name
|
||||
|
||||
def __getattr__(self, name):
|
||||
__import__(self.__name__)
|
||||
mod = sys.modules[self.__name__]
|
||||
self.__dict__.update(mod.__dict__)
|
||||
return getattr(mod, name)
|
||||
|
||||
|
||||
_LOWERNAMES = [
|
||||
# email.<old name> -> email.<new name is lowercased old name>
|
||||
'Charset',
|
||||
'Encoders',
|
||||
'Errors',
|
||||
'FeedParser',
|
||||
'Generator',
|
||||
'Header',
|
||||
'Iterators',
|
||||
'Message',
|
||||
'Parser',
|
||||
'Utils',
|
||||
'base64MIME',
|
||||
'quopriMIME',
|
||||
]
|
||||
|
||||
_MIMENAMES = [
|
||||
# email.MIME<old name> -> email.mime.<new name is lowercased old name>
|
||||
'Audio',
|
||||
'Base',
|
||||
'Image',
|
||||
'Message',
|
||||
'Multipart',
|
||||
'NonMultipart',
|
||||
'Text',
|
||||
]
|
||||
|
||||
for _name in _LOWERNAMES:
|
||||
importer = LazyImporter(_name.lower())
|
||||
sys.modules['email.' + _name] = importer
|
||||
setattr(sys.modules['email'], _name, importer)
|
||||
|
||||
|
||||
import email.mime
|
||||
for _name in _MIMENAMES:
|
||||
importer = LazyImporter('mime.' + _name.lower())
|
||||
sys.modules['email.MIME' + _name] = importer
|
||||
setattr(sys.modules['email'], 'MIME' + _name, importer)
|
||||
setattr(sys.modules['email.mime'], _name, importer)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,497 @@
|
|||
# Copyright (C) 2002-2007 Python Software Foundation
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Email address parsing code.
|
||||
|
||||
Lifted directly from rfc822.py. This should eventually be rewritten.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'mktime_tz',
|
||||
'parsedate',
|
||||
'parsedate_tz',
|
||||
'quote',
|
||||
]
|
||||
|
||||
import time, calendar
|
||||
|
||||
SPACE = ' '
|
||||
EMPTYSTRING = ''
|
||||
COMMASPACE = ', '
|
||||
|
||||
# Parse a date field
|
||||
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
|
||||
'aug', 'sep', 'oct', 'nov', 'dec',
|
||||
'january', 'february', 'march', 'april', 'may', 'june', 'july',
|
||||
'august', 'september', 'october', 'november', 'december']
|
||||
|
||||
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
|
||||
|
||||
# The timezone table does not include the military time zones defined
|
||||
# in RFC822, other than Z. According to RFC1123, the description in
|
||||
# RFC822 gets the signs wrong, so we can't rely on any such time
|
||||
# zones. RFC1123 recommends that numeric timezone indicators be used
|
||||
# instead of timezone names.
|
||||
|
||||
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
|
||||
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
|
||||
'EST': -500, 'EDT': -400, # Eastern
|
||||
'CST': -600, 'CDT': -500, # Central
|
||||
'MST': -700, 'MDT': -600, # Mountain
|
||||
'PST': -800, 'PDT': -700 # Pacific
|
||||
}
|
||||
|
||||
|
||||
def parsedate_tz(data):
|
||||
"""Convert a date string to a time tuple.
|
||||
|
||||
Accounts for military timezones.
|
||||
"""
|
||||
data = data.split()
|
||||
# The FWS after the comma after the day-of-week is optional, so search and
|
||||
# adjust for this.
|
||||
if data[0].endswith(',') or data[0].lower() in _daynames:
|
||||
# There's a dayname here. Skip it
|
||||
del data[0]
|
||||
else:
|
||||
i = data[0].rfind(',')
|
||||
if i >= 0:
|
||||
data[0] = data[0][i+1:]
|
||||
if len(data) == 3: # RFC 850 date, deprecated
|
||||
stuff = data[0].split('-')
|
||||
if len(stuff) == 3:
|
||||
data = stuff + data[1:]
|
||||
if len(data) == 4:
|
||||
s = data[3]
|
||||
i = s.find('+')
|
||||
if i > 0:
|
||||
data[3:] = [s[:i], s[i+1:]]
|
||||
else:
|
||||
data.append('') # Dummy tz
|
||||
if len(data) < 5:
|
||||
return None
|
||||
data = data[:5]
|
||||
[dd, mm, yy, tm, tz] = data
|
||||
mm = mm.lower()
|
||||
if mm not in _monthnames:
|
||||
dd, mm = mm, dd.lower()
|
||||
if mm not in _monthnames:
|
||||
return None
|
||||
mm = _monthnames.index(mm) + 1
|
||||
if mm > 12:
|
||||
mm -= 12
|
||||
if dd[-1] == ',':
|
||||
dd = dd[:-1]
|
||||
i = yy.find(':')
|
||||
if i > 0:
|
||||
yy, tm = tm, yy
|
||||
if yy[-1] == ',':
|
||||
yy = yy[:-1]
|
||||
if not yy[0].isdigit():
|
||||
yy, tz = tz, yy
|
||||
if tm[-1] == ',':
|
||||
tm = tm[:-1]
|
||||
tm = tm.split(':')
|
||||
if len(tm) == 2:
|
||||
[thh, tmm] = tm
|
||||
tss = '0'
|
||||
elif len(tm) == 3:
|
||||
[thh, tmm, tss] = tm
|
||||
else:
|
||||
return None
|
||||
try:
|
||||
yy = int(yy)
|
||||
dd = int(dd)
|
||||
thh = int(thh)
|
||||
tmm = int(tmm)
|
||||
tss = int(tss)
|
||||
except ValueError:
|
||||
return None
|
||||
# Check for a yy specified in two-digit format, then convert it to the
|
||||
# appropriate four-digit format, according to the POSIX standard. RFC 822
|
||||
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
|
||||
# mandates a 4-digit yy. For more information, see the documentation for
|
||||
# the time module.
|
||||
if yy < 100:
|
||||
# The year is between 1969 and 1999 (inclusive).
|
||||
if yy > 68:
|
||||
yy += 1900
|
||||
# The year is between 2000 and 2068 (inclusive).
|
||||
else:
|
||||
yy += 2000
|
||||
tzoffset = None
|
||||
tz = tz.upper()
|
||||
if tz in _timezones:
|
||||
tzoffset = _timezones[tz]
|
||||
else:
|
||||
try:
|
||||
tzoffset = int(tz)
|
||||
except ValueError:
|
||||
pass
|
||||
# Convert a timezone offset into seconds ; -0500 -> -18000
|
||||
if tzoffset:
|
||||
if tzoffset < 0:
|
||||
tzsign = -1
|
||||
tzoffset = -tzoffset
|
||||
else:
|
||||
tzsign = 1
|
||||
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
|
||||
# Daylight Saving Time flag is set to -1, since DST is unknown.
|
||||
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
|
||||
|
||||
|
||||
def parsedate(data):
|
||||
"""Convert a time string to a time tuple."""
|
||||
t = parsedate_tz(data)
|
||||
if isinstance(t, tuple):
|
||||
return t[:9]
|
||||
else:
|
||||
return t
|
||||
|
||||
|
||||
def mktime_tz(data):
|
||||
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
|
||||
if data[9] is None:
|
||||
# No zone info, so localtime is better assumption than GMT
|
||||
return time.mktime(data[:8] + (-1,))
|
||||
else:
|
||||
t = calendar.timegm(data)
|
||||
return t - data[9]
|
||||
|
||||
|
||||
def quote(str):
|
||||
"""Prepare string to be used in a quoted string.
|
||||
|
||||
Turns backslash and double quote characters into quoted pairs. These
|
||||
are the only characters that need to be quoted inside a quoted string.
|
||||
Does not add the surrounding double quotes.
|
||||
"""
|
||||
return str.replace('\\', '\\\\').replace('"', '\\"')
|
||||
|
||||
|
||||
class AddrlistClass:
|
||||
"""Address parser class by Ben Escoto.
|
||||
|
||||
To understand what this class does, it helps to have a copy of RFC 2822 in
|
||||
front of you.
|
||||
|
||||
Note: this class interface is deprecated and may be removed in the future.
|
||||
Use rfc822.AddressList instead.
|
||||
"""
|
||||
|
||||
def __init__(self, field):
|
||||
"""Initialize a new instance.
|
||||
|
||||
`field' is an unparsed address header field, containing
|
||||
one or more addresses.
|
||||
"""
|
||||
self.specials = '()<>@,:;.\"[]'
|
||||
self.pos = 0
|
||||
self.LWS = ' \t'
|
||||
self.CR = '\r\n'
|
||||
self.FWS = self.LWS + self.CR
|
||||
self.atomends = self.specials + self.LWS + self.CR
|
||||
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
|
||||
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
|
||||
# syntax, so allow dots in phrases.
|
||||
self.phraseends = self.atomends.replace('.', '')
|
||||
self.field = field
|
||||
self.commentlist = []
|
||||
|
||||
def gotonext(self):
|
||||
"""Parse up to the start of the next address."""
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in self.LWS + '\n\r':
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '(':
|
||||
self.commentlist.append(self.getcomment())
|
||||
else:
|
||||
break
|
||||
|
||||
def getaddrlist(self):
|
||||
"""Parse all addresses.
|
||||
|
||||
Returns a list containing all of the addresses.
|
||||
"""
|
||||
result = []
|
||||
while self.pos < len(self.field):
|
||||
ad = self.getaddress()
|
||||
if ad:
|
||||
result += ad
|
||||
else:
|
||||
result.append(('', ''))
|
||||
return result
|
||||
|
||||
def getaddress(self):
|
||||
"""Parse the next address."""
|
||||
self.commentlist = []
|
||||
self.gotonext()
|
||||
|
||||
oldpos = self.pos
|
||||
oldcl = self.commentlist
|
||||
plist = self.getphraselist()
|
||||
|
||||
self.gotonext()
|
||||
returnlist = []
|
||||
|
||||
if self.pos >= len(self.field):
|
||||
# Bad email address technically, no domain.
|
||||
if plist:
|
||||
returnlist = [(SPACE.join(self.commentlist), plist[0])]
|
||||
|
||||
elif self.field[self.pos] in '.@':
|
||||
# email address is just an addrspec
|
||||
# this isn't very efficient since we start over
|
||||
self.pos = oldpos
|
||||
self.commentlist = oldcl
|
||||
addrspec = self.getaddrspec()
|
||||
returnlist = [(SPACE.join(self.commentlist), addrspec)]
|
||||
|
||||
elif self.field[self.pos] == ':':
|
||||
# address is a group
|
||||
returnlist = []
|
||||
|
||||
fieldlen = len(self.field)
|
||||
self.pos += 1
|
||||
while self.pos < len(self.field):
|
||||
self.gotonext()
|
||||
if self.pos < fieldlen and self.field[self.pos] == ';':
|
||||
self.pos += 1
|
||||
break
|
||||
returnlist = returnlist + self.getaddress()
|
||||
|
||||
elif self.field[self.pos] == '<':
|
||||
# Address is a phrase then a route addr
|
||||
routeaddr = self.getrouteaddr()
|
||||
|
||||
if self.commentlist:
|
||||
returnlist = [(SPACE.join(plist) + ' (' +
|
||||
' '.join(self.commentlist) + ')', routeaddr)]
|
||||
else:
|
||||
returnlist = [(SPACE.join(plist), routeaddr)]
|
||||
|
||||
else:
|
||||
if plist:
|
||||
returnlist = [(SPACE.join(self.commentlist), plist[0])]
|
||||
elif self.field[self.pos] in self.specials:
|
||||
self.pos += 1
|
||||
|
||||
self.gotonext()
|
||||
if self.pos < len(self.field) and self.field[self.pos] == ',':
|
||||
self.pos += 1
|
||||
return returnlist
|
||||
|
||||
def getrouteaddr(self):
|
||||
"""Parse a route address (Return-path value).
|
||||
|
||||
This method just skips all the route stuff and returns the addrspec.
|
||||
"""
|
||||
if self.field[self.pos] != '<':
|
||||
return
|
||||
|
||||
expectroute = False
|
||||
self.pos += 1
|
||||
self.gotonext()
|
||||
adlist = ''
|
||||
while self.pos < len(self.field):
|
||||
if expectroute:
|
||||
self.getdomain()
|
||||
expectroute = False
|
||||
elif self.field[self.pos] == '>':
|
||||
self.pos += 1
|
||||
break
|
||||
elif self.field[self.pos] == '@':
|
||||
self.pos += 1
|
||||
expectroute = True
|
||||
elif self.field[self.pos] == ':':
|
||||
self.pos += 1
|
||||
else:
|
||||
adlist = self.getaddrspec()
|
||||
self.pos += 1
|
||||
break
|
||||
self.gotonext()
|
||||
|
||||
return adlist
|
||||
|
||||
def getaddrspec(self):
|
||||
"""Parse an RFC 2822 addr-spec."""
|
||||
aslist = []
|
||||
|
||||
self.gotonext()
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] == '.':
|
||||
aslist.append('.')
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '"':
|
||||
aslist.append('"%s"' % quote(self.getquote()))
|
||||
elif self.field[self.pos] in self.atomends:
|
||||
break
|
||||
else:
|
||||
aslist.append(self.getatom())
|
||||
self.gotonext()
|
||||
|
||||
if self.pos >= len(self.field) or self.field[self.pos] != '@':
|
||||
return EMPTYSTRING.join(aslist)
|
||||
|
||||
aslist.append('@')
|
||||
self.pos += 1
|
||||
self.gotonext()
|
||||
return EMPTYSTRING.join(aslist) + self.getdomain()
|
||||
|
||||
def getdomain(self):
|
||||
"""Get the complete domain name from an address."""
|
||||
sdlist = []
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in self.LWS:
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '(':
|
||||
self.commentlist.append(self.getcomment())
|
||||
elif self.field[self.pos] == '[':
|
||||
sdlist.append(self.getdomainliteral())
|
||||
elif self.field[self.pos] == '.':
|
||||
self.pos += 1
|
||||
sdlist.append('.')
|
||||
elif self.field[self.pos] in self.atomends:
|
||||
break
|
||||
else:
|
||||
sdlist.append(self.getatom())
|
||||
return EMPTYSTRING.join(sdlist)
|
||||
|
||||
def getdelimited(self, beginchar, endchars, allowcomments=True):
|
||||
"""Parse a header fragment delimited by special characters.
|
||||
|
||||
`beginchar' is the start character for the fragment.
|
||||
If self is not looking at an instance of `beginchar' then
|
||||
getdelimited returns the empty string.
|
||||
|
||||
`endchars' is a sequence of allowable end-delimiting characters.
|
||||
Parsing stops when one of these is encountered.
|
||||
|
||||
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
|
||||
within the parsed fragment.
|
||||
"""
|
||||
if self.field[self.pos] != beginchar:
|
||||
return ''
|
||||
|
||||
slist = ['']
|
||||
quote = False
|
||||
self.pos += 1
|
||||
while self.pos < len(self.field):
|
||||
if quote:
|
||||
slist.append(self.field[self.pos])
|
||||
quote = False
|
||||
elif self.field[self.pos] in endchars:
|
||||
self.pos += 1
|
||||
break
|
||||
elif allowcomments and self.field[self.pos] == '(':
|
||||
slist.append(self.getcomment())
|
||||
continue # have already advanced pos from getcomment
|
||||
elif self.field[self.pos] == '\\':
|
||||
quote = True
|
||||
else:
|
||||
slist.append(self.field[self.pos])
|
||||
self.pos += 1
|
||||
|
||||
return EMPTYSTRING.join(slist)
|
||||
|
||||
def getquote(self):
|
||||
"""Get a quote-delimited fragment from self's field."""
|
||||
return self.getdelimited('"', '"\r', False)
|
||||
|
||||
def getcomment(self):
|
||||
"""Get a parenthesis-delimited fragment from self's field."""
|
||||
return self.getdelimited('(', ')\r', True)
|
||||
|
||||
def getdomainliteral(self):
|
||||
"""Parse an RFC 2822 domain-literal."""
|
||||
return '[%s]' % self.getdelimited('[', ']\r', False)
|
||||
|
||||
def getatom(self, atomends=None):
|
||||
"""Parse an RFC 2822 atom.
|
||||
|
||||
Optional atomends specifies a different set of end token delimiters
|
||||
(the default is to use self.atomends). This is used e.g. in
|
||||
getphraselist() since phrase endings must not include the `.' (which
|
||||
is legal in phrases)."""
|
||||
atomlist = ['']
|
||||
if atomends is None:
|
||||
atomends = self.atomends
|
||||
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in atomends:
|
||||
break
|
||||
else:
|
||||
atomlist.append(self.field[self.pos])
|
||||
self.pos += 1
|
||||
|
||||
return EMPTYSTRING.join(atomlist)
|
||||
|
||||
def getphraselist(self):
|
||||
"""Parse a sequence of RFC 2822 phrases.
|
||||
|
||||
A phrase is a sequence of words, which are in turn either RFC 2822
|
||||
atoms or quoted-strings. Phrases are canonicalized by squeezing all
|
||||
runs of continuous whitespace into one space.
|
||||
"""
|
||||
plist = []
|
||||
|
||||
while self.pos < len(self.field):
|
||||
if self.field[self.pos] in self.FWS:
|
||||
self.pos += 1
|
||||
elif self.field[self.pos] == '"':
|
||||
plist.append(self.getquote())
|
||||
elif self.field[self.pos] == '(':
|
||||
self.commentlist.append(self.getcomment())
|
||||
elif self.field[self.pos] in self.phraseends:
|
||||
break
|
||||
else:
|
||||
plist.append(self.getatom(self.phraseends))
|
||||
|
||||
return plist
|
||||
|
||||
class AddressList(AddrlistClass):
|
||||
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
|
||||
def __init__(self, field):
|
||||
AddrlistClass.__init__(self, field)
|
||||
if field:
|
||||
self.addresslist = self.getaddrlist()
|
||||
else:
|
||||
self.addresslist = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.addresslist)
|
||||
|
||||
def __add__(self, other):
|
||||
# Set union
|
||||
newaddr = AddressList(None)
|
||||
newaddr.addresslist = self.addresslist[:]
|
||||
for x in other.addresslist:
|
||||
if not x in self.addresslist:
|
||||
newaddr.addresslist.append(x)
|
||||
return newaddr
|
||||
|
||||
def __iadd__(self, other):
|
||||
# Set union, in-place
|
||||
for x in other.addresslist:
|
||||
if not x in self.addresslist:
|
||||
self.addresslist.append(x)
|
||||
return self
|
||||
|
||||
def __sub__(self, other):
|
||||
# Set difference
|
||||
newaddr = AddressList(None)
|
||||
for x in self.addresslist:
|
||||
if not x in other.addresslist:
|
||||
newaddr.addresslist.append(x)
|
||||
return newaddr
|
||||
|
||||
def __isub__(self, other):
|
||||
# Set difference, in-place
|
||||
for x in other.addresslist:
|
||||
if x in self.addresslist:
|
||||
self.addresslist.remove(x)
|
||||
return self
|
||||
|
||||
def __getitem__(self, index):
|
||||
# Make indexing, slices, and 'in' work
|
||||
return self.addresslist[index]
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,183 @@
|
|||
# Copyright (C) 2002-2006 Python Software Foundation
|
||||
# Author: Ben Gertzfield
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Base64 content transfer encoding per RFCs 2045-2047.
|
||||
|
||||
This module handles the content transfer encoding method defined in RFC 2045
|
||||
to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
|
||||
characters encoding known as Base64.
|
||||
|
||||
It is used in the MIME standards for email to attach images, audio, and text
|
||||
using some 8-bit character sets to messages.
|
||||
|
||||
This module provides an interface to encode and decode both headers and bodies
|
||||
with Base64 encoding.
|
||||
|
||||
RFC 2045 defines a method for including character set information in an
|
||||
`encoded-word' in a header. This method is commonly used for 8-bit real names
|
||||
in To:, From:, Cc:, etc. fields, as well as Subject: lines.
|
||||
|
||||
This module does not do the line wrapping or end-of-line character conversion
|
||||
necessary for proper internationalized headers; it only does dumb encoding and
|
||||
decoding. To deal with the various line wrapping issues, use the email.header
|
||||
module.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'base64_len',
|
||||
'body_decode',
|
||||
'body_encode',
|
||||
'decode',
|
||||
'decodestring',
|
||||
'encode',
|
||||
'encodestring',
|
||||
'header_encode',
|
||||
]
|
||||
|
||||
|
||||
from binascii import b2a_base64, a2b_base64
|
||||
from email.utils import fix_eols
|
||||
|
||||
CRLF = '\r\n'
|
||||
NL = '\n'
|
||||
EMPTYSTRING = ''
|
||||
|
||||
# See also Charset.py
|
||||
MISC_LEN = 7
|
||||
|
||||
|
||||
|
||||
# Helpers
|
||||
def base64_len(s):
|
||||
"""Return the length of s when it is encoded with base64."""
|
||||
groups_of_3, leftover = divmod(len(s), 3)
|
||||
# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
|
||||
# Thanks, Tim!
|
||||
n = groups_of_3 * 4
|
||||
if leftover:
|
||||
n += 4
|
||||
return n
|
||||
|
||||
|
||||
|
||||
def header_encode(header, charset='iso-8859-1', keep_eols=False,
|
||||
maxlinelen=76, eol=NL):
|
||||
"""Encode a single header line with Base64 encoding in a given charset.
|
||||
|
||||
Defined in RFC 2045, this Base64 encoding is identical to normal Base64
|
||||
encoding, except that each line must be intelligently wrapped (respecting
|
||||
the Base64 encoding), and subsequent lines must start with a space.
|
||||
|
||||
charset names the character set to use to encode the header. It defaults
|
||||
to iso-8859-1.
|
||||
|
||||
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
|
||||
to the canonical email line separator \\r\\n unless the keep_eols
|
||||
parameter is True (the default is False).
|
||||
|
||||
Each line of the header will be terminated in the value of eol, which
|
||||
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
|
||||
this function directly in email.
|
||||
|
||||
The resulting string will be in the form:
|
||||
|
||||
"=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
|
||||
=?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
|
||||
|
||||
with each line wrapped at, at most, maxlinelen characters (defaults to 76
|
||||
characters).
|
||||
"""
|
||||
# Return empty headers unchanged
|
||||
if not header:
|
||||
return header
|
||||
|
||||
if not keep_eols:
|
||||
header = fix_eols(header)
|
||||
|
||||
# Base64 encode each line, in encoded chunks no greater than maxlinelen in
|
||||
# length, after the RFC chrome is added in.
|
||||
base64ed = []
|
||||
max_encoded = maxlinelen - len(charset) - MISC_LEN
|
||||
max_unencoded = max_encoded * 3 // 4
|
||||
|
||||
for i in range(0, len(header), max_unencoded):
|
||||
base64ed.append(b2a_base64(header[i:i+max_unencoded]))
|
||||
|
||||
# Now add the RFC chrome to each encoded chunk
|
||||
lines = []
|
||||
for line in base64ed:
|
||||
# Ignore the last character of each line if it is a newline
|
||||
if line.endswith(NL):
|
||||
line = line[:-1]
|
||||
# Add the chrome
|
||||
lines.append('=?%s?b?%s?=' % (charset, line))
|
||||
# Glue the lines together and return it. BAW: should we be able to
|
||||
# specify the leading whitespace in the joiner?
|
||||
joiner = eol + ' '
|
||||
return joiner.join(lines)
|
||||
|
||||
|
||||
|
||||
def encode(s, binary=True, maxlinelen=76, eol=NL):
|
||||
"""Encode a string with base64.
|
||||
|
||||
Each line will be wrapped at, at most, maxlinelen characters (defaults to
|
||||
76 characters).
|
||||
|
||||
If binary is False, end-of-line characters will be converted to the
|
||||
canonical email end-of-line sequence \\r\\n. Otherwise they will be left
|
||||
verbatim (this is the default).
|
||||
|
||||
Each line of encoded text will end with eol, which defaults to "\\n". Set
|
||||
this to "\\r\\n" if you will be using the result of this function directly
|
||||
in an email.
|
||||
"""
|
||||
if not s:
|
||||
return s
|
||||
|
||||
if not binary:
|
||||
s = fix_eols(s)
|
||||
|
||||
encvec = []
|
||||
max_unencoded = maxlinelen * 3 // 4
|
||||
for i in range(0, len(s), max_unencoded):
|
||||
# BAW: should encode() inherit b2a_base64()'s dubious behavior in
|
||||
# adding a newline to the encoded string?
|
||||
enc = b2a_base64(s[i:i + max_unencoded])
|
||||
if enc.endswith(NL) and eol != NL:
|
||||
enc = enc[:-1] + eol
|
||||
encvec.append(enc)
|
||||
return EMPTYSTRING.join(encvec)
|
||||
|
||||
|
||||
# For convenience and backwards compatibility w/ standard base64 module
|
||||
body_encode = encode
|
||||
encodestring = encode
|
||||
|
||||
|
||||
|
||||
def decode(s, convert_eols=None):
|
||||
"""Decode a raw base64 string.
|
||||
|
||||
If convert_eols is set to a string value, all canonical email linefeeds,
|
||||
e.g. "\\r\\n", in the decoded text will be converted to the value of
|
||||
convert_eols. os.linesep is a good choice for convert_eols if you are
|
||||
decoding a text attachment.
|
||||
|
||||
This function does not parse a full MIME header value encoded with
|
||||
base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
|
||||
level email.header class for that functionality.
|
||||
"""
|
||||
if not s:
|
||||
return s
|
||||
|
||||
dec = a2b_base64(s)
|
||||
if convert_eols:
|
||||
return dec.replace(CRLF, convert_eols)
|
||||
return dec
|
||||
|
||||
|
||||
# For convenience and backwards compatibility w/ standard base64 module
|
||||
body_decode = decode
|
||||
decodestring = decode
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,397 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Ben Gertzfield, Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
__all__ = [
|
||||
'Charset',
|
||||
'add_alias',
|
||||
'add_charset',
|
||||
'add_codec',
|
||||
]
|
||||
|
||||
import codecs
|
||||
import email.base64mime
|
||||
import email.quoprimime
|
||||
|
||||
from email import errors
|
||||
from email.encoders import encode_7or8bit
|
||||
|
||||
|
||||
|
||||
# Flags for types of header encodings
|
||||
QP = 1 # Quoted-Printable
|
||||
BASE64 = 2 # Base64
|
||||
SHORTEST = 3 # the shorter of QP and base64, but only for headers
|
||||
|
||||
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
|
||||
MISC_LEN = 7
|
||||
|
||||
DEFAULT_CHARSET = 'us-ascii'
|
||||
|
||||
|
||||
|
||||
# Defaults
|
||||
CHARSETS = {
|
||||
# input header enc body enc output conv
|
||||
'iso-8859-1': (QP, QP, None),
|
||||
'iso-8859-2': (QP, QP, None),
|
||||
'iso-8859-3': (QP, QP, None),
|
||||
'iso-8859-4': (QP, QP, None),
|
||||
# iso-8859-5 is Cyrillic, and not especially used
|
||||
# iso-8859-6 is Arabic, also not particularly used
|
||||
# iso-8859-7 is Greek, QP will not make it readable
|
||||
# iso-8859-8 is Hebrew, QP will not make it readable
|
||||
'iso-8859-9': (QP, QP, None),
|
||||
'iso-8859-10': (QP, QP, None),
|
||||
# iso-8859-11 is Thai, QP will not make it readable
|
||||
'iso-8859-13': (QP, QP, None),
|
||||
'iso-8859-14': (QP, QP, None),
|
||||
'iso-8859-15': (QP, QP, None),
|
||||
'iso-8859-16': (QP, QP, None),
|
||||
'windows-1252':(QP, QP, None),
|
||||
'viscii': (QP, QP, None),
|
||||
'us-ascii': (None, None, None),
|
||||
'big5': (BASE64, BASE64, None),
|
||||
'gb2312': (BASE64, BASE64, None),
|
||||
'euc-jp': (BASE64, None, 'iso-2022-jp'),
|
||||
'shift_jis': (BASE64, None, 'iso-2022-jp'),
|
||||
'iso-2022-jp': (BASE64, None, None),
|
||||
'koi8-r': (BASE64, BASE64, None),
|
||||
'utf-8': (SHORTEST, BASE64, 'utf-8'),
|
||||
# We're making this one up to represent raw unencoded 8-bit
|
||||
'8bit': (None, BASE64, 'utf-8'),
|
||||
}
|
||||
|
||||
# Aliases for other commonly-used names for character sets. Map
|
||||
# them to the real ones used in email.
|
||||
ALIASES = {
|
||||
'latin_1': 'iso-8859-1',
|
||||
'latin-1': 'iso-8859-1',
|
||||
'latin_2': 'iso-8859-2',
|
||||
'latin-2': 'iso-8859-2',
|
||||
'latin_3': 'iso-8859-3',
|
||||
'latin-3': 'iso-8859-3',
|
||||
'latin_4': 'iso-8859-4',
|
||||
'latin-4': 'iso-8859-4',
|
||||
'latin_5': 'iso-8859-9',
|
||||
'latin-5': 'iso-8859-9',
|
||||
'latin_6': 'iso-8859-10',
|
||||
'latin-6': 'iso-8859-10',
|
||||
'latin_7': 'iso-8859-13',
|
||||
'latin-7': 'iso-8859-13',
|
||||
'latin_8': 'iso-8859-14',
|
||||
'latin-8': 'iso-8859-14',
|
||||
'latin_9': 'iso-8859-15',
|
||||
'latin-9': 'iso-8859-15',
|
||||
'latin_10':'iso-8859-16',
|
||||
'latin-10':'iso-8859-16',
|
||||
'cp949': 'ks_c_5601-1987',
|
||||
'euc_jp': 'euc-jp',
|
||||
'euc_kr': 'euc-kr',
|
||||
'ascii': 'us-ascii',
|
||||
}
|
||||
|
||||
|
||||
# Map charsets to their Unicode codec strings.
|
||||
CODEC_MAP = {
|
||||
'gb2312': 'eucgb2312_cn',
|
||||
'big5': 'big5_tw',
|
||||
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
|
||||
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
|
||||
# Let that stuff pass through without conversion to/from Unicode.
|
||||
'us-ascii': None,
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Convenience functions for extending the above mappings
|
||||
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
|
||||
"""Add character set properties to the global registry.
|
||||
|
||||
charset is the input character set, and must be the canonical name of a
|
||||
character set.
|
||||
|
||||
Optional header_enc and body_enc is either Charset.QP for
|
||||
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
|
||||
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
|
||||
is only valid for header_enc. It describes how message headers and
|
||||
message bodies in the input charset are to be encoded. Default is no
|
||||
encoding.
|
||||
|
||||
Optional output_charset is the character set that the output should be
|
||||
in. Conversions will proceed from input charset, to Unicode, to the
|
||||
output charset when the method Charset.convert() is called. The default
|
||||
is to output in the same character set as the input.
|
||||
|
||||
Both input_charset and output_charset must have Unicode codec entries in
|
||||
the module's charset-to-codec mapping; use add_codec(charset, codecname)
|
||||
to add codecs the module does not know about. See the codecs module's
|
||||
documentation for more information.
|
||||
"""
|
||||
if body_enc == SHORTEST:
|
||||
raise ValueError('SHORTEST not allowed for body_enc')
|
||||
CHARSETS[charset] = (header_enc, body_enc, output_charset)
|
||||
|
||||
|
||||
def add_alias(alias, canonical):
|
||||
"""Add a character set alias.
|
||||
|
||||
alias is the alias name, e.g. latin-1
|
||||
canonical is the character set's canonical name, e.g. iso-8859-1
|
||||
"""
|
||||
ALIASES[alias] = canonical
|
||||
|
||||
|
||||
def add_codec(charset, codecname):
|
||||
"""Add a codec that map characters in the given charset to/from Unicode.
|
||||
|
||||
charset is the canonical name of a character set. codecname is the name
|
||||
of a Python codec, as appropriate for the second argument to the unicode()
|
||||
built-in, or to the encode() method of a Unicode string.
|
||||
"""
|
||||
CODEC_MAP[charset] = codecname
|
||||
|
||||
|
||||
|
||||
class Charset:
|
||||
"""Map character sets to their email properties.
|
||||
|
||||
This class provides information about the requirements imposed on email
|
||||
for a specific character set. It also provides convenience routines for
|
||||
converting between character sets, given the availability of the
|
||||
applicable codecs. Given a character set, it will do its best to provide
|
||||
information on how to use that character set in an email in an
|
||||
RFC-compliant way.
|
||||
|
||||
Certain character sets must be encoded with quoted-printable or base64
|
||||
when used in email headers or bodies. Certain character sets must be
|
||||
converted outright, and are not allowed in email. Instances of this
|
||||
module expose the following information about a character set:
|
||||
|
||||
input_charset: The initial character set specified. Common aliases
|
||||
are converted to their `official' email names (e.g. latin_1
|
||||
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
|
||||
|
||||
header_encoding: If the character set must be encoded before it can be
|
||||
used in an email header, this attribute will be set to
|
||||
Charset.QP (for quoted-printable), Charset.BASE64 (for
|
||||
base64 encoding), or Charset.SHORTEST for the shortest of
|
||||
QP or BASE64 encoding. Otherwise, it will be None.
|
||||
|
||||
body_encoding: Same as header_encoding, but describes the encoding for the
|
||||
mail message's body, which indeed may be different than the
|
||||
header encoding. Charset.SHORTEST is not allowed for
|
||||
body_encoding.
|
||||
|
||||
output_charset: Some character sets must be converted before the can be
|
||||
used in email headers or bodies. If the input_charset is
|
||||
one of them, this attribute will contain the name of the
|
||||
charset output will be converted to. Otherwise, it will
|
||||
be None.
|
||||
|
||||
input_codec: The name of the Python codec used to convert the
|
||||
input_charset to Unicode. If no conversion codec is
|
||||
necessary, this attribute will be None.
|
||||
|
||||
output_codec: The name of the Python codec used to convert Unicode
|
||||
to the output_charset. If no conversion codec is necessary,
|
||||
this attribute will have the same value as the input_codec.
|
||||
"""
|
||||
def __init__(self, input_charset=DEFAULT_CHARSET):
|
||||
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
|
||||
# unicode because its .lower() is locale insensitive. If the argument
|
||||
# is already a unicode, we leave it at that, but ensure that the
|
||||
# charset is ASCII, as the standard (RFC XXX) requires.
|
||||
try:
|
||||
if isinstance(input_charset, unicode):
|
||||
input_charset.encode('ascii')
|
||||
else:
|
||||
input_charset = unicode(input_charset, 'ascii')
|
||||
except UnicodeError:
|
||||
raise errors.CharsetError(input_charset)
|
||||
input_charset = input_charset.lower().encode('ascii')
|
||||
# Set the input charset after filtering through the aliases and/or codecs
|
||||
if not (input_charset in ALIASES or input_charset in CHARSETS):
|
||||
try:
|
||||
input_charset = codecs.lookup(input_charset).name
|
||||
except LookupError:
|
||||
pass
|
||||
self.input_charset = ALIASES.get(input_charset, input_charset)
|
||||
# We can try to guess which encoding and conversion to use by the
|
||||
# charset_map dictionary. Try that first, but let the user override
|
||||
# it.
|
||||
henc, benc, conv = CHARSETS.get(self.input_charset,
|
||||
(SHORTEST, BASE64, None))
|
||||
if not conv:
|
||||
conv = self.input_charset
|
||||
# Set the attributes, allowing the arguments to override the default.
|
||||
self.header_encoding = henc
|
||||
self.body_encoding = benc
|
||||
self.output_charset = ALIASES.get(conv, conv)
|
||||
# Now set the codecs. If one isn't defined for input_charset,
|
||||
# guess and try a Unicode codec with the same name as input_codec.
|
||||
self.input_codec = CODEC_MAP.get(self.input_charset,
|
||||
self.input_charset)
|
||||
self.output_codec = CODEC_MAP.get(self.output_charset,
|
||||
self.output_charset)
|
||||
|
||||
def __str__(self):
|
||||
return self.input_charset.lower()
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
def __eq__(self, other):
|
||||
return str(self) == str(other).lower()
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def get_body_encoding(self):
|
||||
"""Return the content-transfer-encoding used for body encoding.
|
||||
|
||||
This is either the string `quoted-printable' or `base64' depending on
|
||||
the encoding used, or it is a function in which case you should call
|
||||
the function with a single argument, the Message object being
|
||||
encoded. The function should then set the Content-Transfer-Encoding
|
||||
header itself to whatever is appropriate.
|
||||
|
||||
Returns "quoted-printable" if self.body_encoding is QP.
|
||||
Returns "base64" if self.body_encoding is BASE64.
|
||||
Returns "7bit" otherwise.
|
||||
"""
|
||||
assert self.body_encoding != SHORTEST
|
||||
if self.body_encoding == QP:
|
||||
return 'quoted-printable'
|
||||
elif self.body_encoding == BASE64:
|
||||
return 'base64'
|
||||
else:
|
||||
return encode_7or8bit
|
||||
|
||||
def convert(self, s):
|
||||
"""Convert a string from the input_codec to the output_codec."""
|
||||
if self.input_codec != self.output_codec:
|
||||
return unicode(s, self.input_codec).encode(self.output_codec)
|
||||
else:
|
||||
return s
|
||||
|
||||
def to_splittable(self, s):
|
||||
"""Convert a possibly multibyte string to a safely splittable format.
|
||||
|
||||
Uses the input_codec to try and convert the string to Unicode, so it
|
||||
can be safely split on character boundaries (even for multibyte
|
||||
characters).
|
||||
|
||||
Returns the string as-is if it isn't known how to convert it to
|
||||
Unicode with the input_charset.
|
||||
|
||||
Characters that could not be converted to Unicode will be replaced
|
||||
with the Unicode replacement character U+FFFD.
|
||||
"""
|
||||
if isinstance(s, unicode) or self.input_codec is None:
|
||||
return s
|
||||
try:
|
||||
return unicode(s, self.input_codec, 'replace')
|
||||
except LookupError:
|
||||
# Input codec not installed on system, so return the original
|
||||
# string unchanged.
|
||||
return s
|
||||
|
||||
def from_splittable(self, ustr, to_output=True):
|
||||
"""Convert a splittable string back into an encoded string.
|
||||
|
||||
Uses the proper codec to try and convert the string from Unicode back
|
||||
into an encoded format. Return the string as-is if it is not Unicode,
|
||||
or if it could not be converted from Unicode.
|
||||
|
||||
Characters that could not be converted from Unicode will be replaced
|
||||
with an appropriate character (usually '?').
|
||||
|
||||
If to_output is True (the default), uses output_codec to convert to an
|
||||
encoded format. If to_output is False, uses input_codec.
|
||||
"""
|
||||
if to_output:
|
||||
codec = self.output_codec
|
||||
else:
|
||||
codec = self.input_codec
|
||||
if not isinstance(ustr, unicode) or codec is None:
|
||||
return ustr
|
||||
try:
|
||||
return ustr.encode(codec, 'replace')
|
||||
except LookupError:
|
||||
# Output codec not installed
|
||||
return ustr
|
||||
|
||||
def get_output_charset(self):
|
||||
"""Return the output character set.
|
||||
|
||||
This is self.output_charset if that is not None, otherwise it is
|
||||
self.input_charset.
|
||||
"""
|
||||
return self.output_charset or self.input_charset
|
||||
|
||||
def encoded_header_len(self, s):
|
||||
"""Return the length of the encoded header string."""
|
||||
cset = self.get_output_charset()
|
||||
# The len(s) of a 7bit encoding is len(s)
|
||||
if self.header_encoding == BASE64:
|
||||
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
|
||||
elif self.header_encoding == QP:
|
||||
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
|
||||
elif self.header_encoding == SHORTEST:
|
||||
lenb64 = email.base64mime.base64_len(s)
|
||||
lenqp = email.quoprimime.header_quopri_len(s)
|
||||
return min(lenb64, lenqp) + len(cset) + MISC_LEN
|
||||
else:
|
||||
return len(s)
|
||||
|
||||
def header_encode(self, s, convert=False):
|
||||
"""Header-encode a string, optionally converting it to output_charset.
|
||||
|
||||
If convert is True, the string will be converted from the input
|
||||
charset to the output charset automatically. This is not useful for
|
||||
multibyte character sets, which have line length issues (multibyte
|
||||
characters must be split on a character, not a byte boundary); use the
|
||||
high-level Header class to deal with these issues. convert defaults
|
||||
to False.
|
||||
|
||||
The type of encoding (base64 or quoted-printable) will be based on
|
||||
self.header_encoding.
|
||||
"""
|
||||
cset = self.get_output_charset()
|
||||
if convert:
|
||||
s = self.convert(s)
|
||||
# 7bit/8bit encodings return the string unchanged (modulo conversions)
|
||||
if self.header_encoding == BASE64:
|
||||
return email.base64mime.header_encode(s, cset)
|
||||
elif self.header_encoding == QP:
|
||||
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
|
||||
elif self.header_encoding == SHORTEST:
|
||||
lenb64 = email.base64mime.base64_len(s)
|
||||
lenqp = email.quoprimime.header_quopri_len(s)
|
||||
if lenb64 < lenqp:
|
||||
return email.base64mime.header_encode(s, cset)
|
||||
else:
|
||||
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
|
||||
else:
|
||||
return s
|
||||
|
||||
def body_encode(self, s, convert=True):
|
||||
"""Body-encode a string and convert it to output_charset.
|
||||
|
||||
If convert is True (the default), the string will be converted from
|
||||
the input charset to output charset automatically. Unlike
|
||||
header_encode(), there are no issues with byte boundaries and
|
||||
multibyte charsets in email bodies, so this is usually pretty safe.
|
||||
|
||||
The type of encoding (base64 or quoted-printable) will be based on
|
||||
self.body_encoding.
|
||||
"""
|
||||
if convert:
|
||||
s = self.convert(s)
|
||||
# 7bit/8bit encodings return the string unchanged (module conversions)
|
||||
if self.body_encoding is BASE64:
|
||||
return email.base64mime.body_encode(s)
|
||||
elif self.body_encoding is QP:
|
||||
return email.quoprimime.body_encode(s)
|
||||
else:
|
||||
return s
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,82 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Encodings and related functions."""
|
||||
|
||||
__all__ = [
|
||||
'encode_7or8bit',
|
||||
'encode_base64',
|
||||
'encode_noop',
|
||||
'encode_quopri',
|
||||
]
|
||||
|
||||
import base64
|
||||
|
||||
from quopri import encodestring as _encodestring
|
||||
|
||||
|
||||
|
||||
def _qencode(s):
|
||||
enc = _encodestring(s, quotetabs=True)
|
||||
# Must encode spaces, which quopri.encodestring() doesn't do
|
||||
return enc.replace(' ', '=20')
|
||||
|
||||
|
||||
def _bencode(s):
|
||||
# We can't quite use base64.encodestring() since it tacks on a "courtesy
|
||||
# newline". Blech!
|
||||
if not s:
|
||||
return s
|
||||
hasnewline = (s[-1] == '\n')
|
||||
value = base64.encodestring(s)
|
||||
if not hasnewline and value[-1] == '\n':
|
||||
return value[:-1]
|
||||
return value
|
||||
|
||||
|
||||
|
||||
def encode_base64(msg):
|
||||
"""Encode the message's payload in Base64.
|
||||
|
||||
Also, add an appropriate Content-Transfer-Encoding header.
|
||||
"""
|
||||
orig = msg.get_payload()
|
||||
encdata = _bencode(orig)
|
||||
msg.set_payload(encdata)
|
||||
msg['Content-Transfer-Encoding'] = 'base64'
|
||||
|
||||
|
||||
|
||||
def encode_quopri(msg):
|
||||
"""Encode the message's payload in quoted-printable.
|
||||
|
||||
Also, add an appropriate Content-Transfer-Encoding header.
|
||||
"""
|
||||
orig = msg.get_payload()
|
||||
encdata = _qencode(orig)
|
||||
msg.set_payload(encdata)
|
||||
msg['Content-Transfer-Encoding'] = 'quoted-printable'
|
||||
|
||||
|
||||
|
||||
def encode_7or8bit(msg):
|
||||
"""Set the Content-Transfer-Encoding header to 7bit or 8bit."""
|
||||
orig = msg.get_payload()
|
||||
if orig is None:
|
||||
# There's no payload. For backwards compatibility we use 7bit
|
||||
msg['Content-Transfer-Encoding'] = '7bit'
|
||||
return
|
||||
# We play a trick to make this go fast. If encoding to ASCII succeeds, we
|
||||
# know the data must be 7bit, otherwise treat it as 8bit.
|
||||
try:
|
||||
orig.encode('ascii')
|
||||
except UnicodeError:
|
||||
msg['Content-Transfer-Encoding'] = '8bit'
|
||||
else:
|
||||
msg['Content-Transfer-Encoding'] = '7bit'
|
||||
|
||||
|
||||
|
||||
def encode_noop(msg):
|
||||
"""Do nothing."""
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,57 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""email package exception classes."""
|
||||
|
||||
|
||||
|
||||
class MessageError(Exception):
|
||||
"""Base class for errors in the email package."""
|
||||
|
||||
|
||||
class MessageParseError(MessageError):
|
||||
"""Base class for message parsing errors."""
|
||||
|
||||
|
||||
class HeaderParseError(MessageParseError):
|
||||
"""Error while parsing headers."""
|
||||
|
||||
|
||||
class BoundaryError(MessageParseError):
|
||||
"""Couldn't find terminating boundary."""
|
||||
|
||||
|
||||
class MultipartConversionError(MessageError, TypeError):
|
||||
"""Conversion to a multipart is prohibited."""
|
||||
|
||||
|
||||
class CharsetError(MessageError):
|
||||
"""An illegal charset was given."""
|
||||
|
||||
|
||||
|
||||
# These are parsing defects which the parser was able to work around.
|
||||
class MessageDefect:
|
||||
"""Base class for a message defect."""
|
||||
|
||||
def __init__(self, line=None):
|
||||
self.line = line
|
||||
|
||||
class NoBoundaryInMultipartDefect(MessageDefect):
|
||||
"""A message claimed to be a multipart but had no boundary parameter."""
|
||||
|
||||
class StartBoundaryNotFoundDefect(MessageDefect):
|
||||
"""The claimed start boundary was never found."""
|
||||
|
||||
class FirstHeaderLineIsContinuationDefect(MessageDefect):
|
||||
"""A message had a continuation line as its first header line."""
|
||||
|
||||
class MisplacedEnvelopeHeaderDefect(MessageDefect):
|
||||
"""A 'Unix-from' header was found in the middle of a header block."""
|
||||
|
||||
class MalformedHeaderDefect(MessageDefect):
|
||||
"""Found a header that was missing a colon, or was otherwise malformed."""
|
||||
|
||||
class MultipartInvariantViolationDefect(MessageDefect):
|
||||
"""A message claimed to be a multipart but no subparts were found."""
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,484 @@
|
|||
# Copyright (C) 2004-2006 Python Software Foundation
|
||||
# Authors: Baxter, Wouters and Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""FeedParser - An email feed parser.
|
||||
|
||||
The feed parser implements an interface for incrementally parsing an email
|
||||
message, line by line. This has advantages for certain applications, such as
|
||||
those reading email messages off a socket.
|
||||
|
||||
FeedParser.feed() is the primary interface for pushing new data into the
|
||||
parser. It returns when there's nothing more it can do with the available
|
||||
data. When you have no more data to push into the parser, call .close().
|
||||
This completes the parsing and returns the root message object.
|
||||
|
||||
The other advantage of this parser is that it will never raise a parsing
|
||||
exception. Instead, when it finds something unexpected, it adds a 'defect' to
|
||||
the current message. Defects are just instances that live on the message
|
||||
object's .defects attribute.
|
||||
"""
|
||||
|
||||
__all__ = ['FeedParser']
|
||||
|
||||
import re
|
||||
|
||||
from email import errors
|
||||
from email import message
|
||||
|
||||
NLCRE = re.compile('\r\n|\r|\n')
|
||||
NLCRE_bol = re.compile('(\r\n|\r|\n)')
|
||||
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
|
||||
NLCRE_crack = re.compile('(\r\n|\r|\n)')
|
||||
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
|
||||
# except controls, SP, and ":".
|
||||
headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
|
||||
EMPTYSTRING = ''
|
||||
NL = '\n'
|
||||
|
||||
NeedMoreData = object()
|
||||
|
||||
|
||||
|
||||
class BufferedSubFile(object):
|
||||
"""A file-ish object that can have new data loaded into it.
|
||||
|
||||
You can also push and pop line-matching predicates onto a stack. When the
|
||||
current predicate matches the current line, a false EOF response
|
||||
(i.e. empty string) is returned instead. This lets the parser adhere to a
|
||||
simple abstraction -- it parses until EOF closes the current message.
|
||||
"""
|
||||
def __init__(self):
|
||||
# The last partial line pushed into this object.
|
||||
self._partial = ''
|
||||
# The list of full, pushed lines, in reverse order
|
||||
self._lines = []
|
||||
# The stack of false-EOF checking predicates.
|
||||
self._eofstack = []
|
||||
# A flag indicating whether the file has been closed or not.
|
||||
self._closed = False
|
||||
|
||||
def push_eof_matcher(self, pred):
|
||||
self._eofstack.append(pred)
|
||||
|
||||
def pop_eof_matcher(self):
|
||||
return self._eofstack.pop()
|
||||
|
||||
def close(self):
|
||||
# Don't forget any trailing partial line.
|
||||
self._lines.append(self._partial)
|
||||
self._partial = ''
|
||||
self._closed = True
|
||||
|
||||
def readline(self):
|
||||
if not self._lines:
|
||||
if self._closed:
|
||||
return ''
|
||||
return NeedMoreData
|
||||
# Pop the line off the stack and see if it matches the current
|
||||
# false-EOF predicate.
|
||||
line = self._lines.pop()
|
||||
# RFC 2046, section 5.1.2 requires us to recognize outer level
|
||||
# boundaries at any level of inner nesting. Do this, but be sure it's
|
||||
# in the order of most to least nested.
|
||||
for ateof in self._eofstack[::-1]:
|
||||
if ateof(line):
|
||||
# We're at the false EOF. But push the last line back first.
|
||||
self._lines.append(line)
|
||||
return ''
|
||||
return line
|
||||
|
||||
def unreadline(self, line):
|
||||
# Let the consumer push a line back into the buffer.
|
||||
assert line is not NeedMoreData
|
||||
self._lines.append(line)
|
||||
|
||||
def push(self, data):
|
||||
"""Push some new data into this object."""
|
||||
# Handle any previous leftovers
|
||||
data, self._partial = self._partial + data, ''
|
||||
# Crack into lines, but preserve the newlines on the end of each
|
||||
parts = NLCRE_crack.split(data)
|
||||
# The *ahem* interesting behaviour of re.split when supplied grouping
|
||||
# parentheses is that the last element of the resulting list is the
|
||||
# data after the final RE. In the case of a NL/CR terminated string,
|
||||
# this is the empty string.
|
||||
self._partial = parts.pop()
|
||||
#GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
|
||||
# is there a \n to follow later?
|
||||
if not self._partial and parts and parts[-1].endswith('\r'):
|
||||
self._partial = parts.pop(-2)+parts.pop()
|
||||
# parts is a list of strings, alternating between the line contents
|
||||
# and the eol character(s). Gather up a list of lines after
|
||||
# re-attaching the newlines.
|
||||
lines = []
|
||||
for i in range(len(parts) // 2):
|
||||
lines.append(parts[i*2] + parts[i*2+1])
|
||||
self.pushlines(lines)
|
||||
|
||||
def pushlines(self, lines):
|
||||
# Reverse and insert at the front of the lines.
|
||||
self._lines[:0] = lines[::-1]
|
||||
|
||||
def is_closed(self):
|
||||
return self._closed
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
line = self.readline()
|
||||
if line == '':
|
||||
raise StopIteration
|
||||
return line
|
||||
|
||||
|
||||
|
||||
class FeedParser:
|
||||
"""A feed-style parser of email."""
|
||||
|
||||
def __init__(self, _factory=message.Message):
|
||||
"""_factory is called with no arguments to create a new message obj"""
|
||||
self._factory = _factory
|
||||
self._input = BufferedSubFile()
|
||||
self._msgstack = []
|
||||
self._parse = self._parsegen().next
|
||||
self._cur = None
|
||||
self._last = None
|
||||
self._headersonly = False
|
||||
|
||||
# Non-public interface for supporting Parser's headersonly flag
|
||||
def _set_headersonly(self):
|
||||
self._headersonly = True
|
||||
|
||||
def feed(self, data):
|
||||
"""Push more data into the parser."""
|
||||
self._input.push(data)
|
||||
self._call_parse()
|
||||
|
||||
def _call_parse(self):
|
||||
try:
|
||||
self._parse()
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""Parse all remaining data and return the root message object."""
|
||||
self._input.close()
|
||||
self._call_parse()
|
||||
root = self._pop_message()
|
||||
assert not self._msgstack
|
||||
# Look for final set of defects
|
||||
if root.get_content_maintype() == 'multipart' \
|
||||
and not root.is_multipart():
|
||||
root.defects.append(errors.MultipartInvariantViolationDefect())
|
||||
return root
|
||||
|
||||
def _new_message(self):
|
||||
msg = self._factory()
|
||||
if self._cur and self._cur.get_content_type() == 'multipart/digest':
|
||||
msg.set_default_type('message/rfc822')
|
||||
if self._msgstack:
|
||||
self._msgstack[-1].attach(msg)
|
||||
self._msgstack.append(msg)
|
||||
self._cur = msg
|
||||
self._last = msg
|
||||
|
||||
def _pop_message(self):
|
||||
retval = self._msgstack.pop()
|
||||
if self._msgstack:
|
||||
self._cur = self._msgstack[-1]
|
||||
else:
|
||||
self._cur = None
|
||||
return retval
|
||||
|
||||
def _parsegen(self):
|
||||
# Create a new message and start by parsing headers.
|
||||
self._new_message()
|
||||
headers = []
|
||||
# Collect the headers, searching for a line that doesn't match the RFC
|
||||
# 2822 header or continuation pattern (including an empty line).
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
if not headerRE.match(line):
|
||||
# If we saw the RFC defined header/body separator
|
||||
# (i.e. newline), just throw it away. Otherwise the line is
|
||||
# part of the body so push it back.
|
||||
if not NLCRE.match(line):
|
||||
self._input.unreadline(line)
|
||||
break
|
||||
headers.append(line)
|
||||
# Done with the headers, so parse them and figure out what we're
|
||||
# supposed to see in the body of the message.
|
||||
self._parse_headers(headers)
|
||||
# Headers-only parsing is a backwards compatibility hack, which was
|
||||
# necessary in the older parser, which could raise errors. All
|
||||
# remaining lines in the input are thrown into the message body.
|
||||
if self._headersonly:
|
||||
lines = []
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
if line == '':
|
||||
break
|
||||
lines.append(line)
|
||||
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||
return
|
||||
if self._cur.get_content_type() == 'message/delivery-status':
|
||||
# message/delivery-status contains blocks of headers separated by
|
||||
# a blank line. We'll represent each header block as a separate
|
||||
# nested message object, but the processing is a bit different
|
||||
# than standard message/* types because there is no body for the
|
||||
# nested messages. A blank line separates the subparts.
|
||||
while True:
|
||||
self._input.push_eof_matcher(NLCRE.match)
|
||||
for retval in self._parsegen():
|
||||
if retval is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
msg = self._pop_message()
|
||||
# We need to pop the EOF matcher in order to tell if we're at
|
||||
# the end of the current file, not the end of the last block
|
||||
# of message headers.
|
||||
self._input.pop_eof_matcher()
|
||||
# The input stream must be sitting at the newline or at the
|
||||
# EOF. We want to see if we're at the end of this subpart, so
|
||||
# first consume the blank line, then test the next line to see
|
||||
# if we're at this subpart's EOF.
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
# Not at EOF so this is a line we're going to need.
|
||||
self._input.unreadline(line)
|
||||
return
|
||||
if self._cur.get_content_maintype() == 'message':
|
||||
# The message claims to be a message/* type, then what follows is
|
||||
# another RFC 2822 message.
|
||||
for retval in self._parsegen():
|
||||
if retval is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
self._pop_message()
|
||||
return
|
||||
if self._cur.get_content_maintype() == 'multipart':
|
||||
boundary = self._cur.get_boundary()
|
||||
if boundary is None:
|
||||
# The message /claims/ to be a multipart but it has not
|
||||
# defined a boundary. That's a problem which we'll handle by
|
||||
# reading everything until the EOF and marking the message as
|
||||
# defective.
|
||||
self._cur.defects.append(errors.NoBoundaryInMultipartDefect())
|
||||
lines = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
lines.append(line)
|
||||
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||
return
|
||||
# Create a line match predicate which matches the inter-part
|
||||
# boundary as well as the end-of-multipart boundary. Don't push
|
||||
# this onto the input stream until we've scanned past the
|
||||
# preamble.
|
||||
separator = '--' + boundary
|
||||
boundaryre = re.compile(
|
||||
'(?P<sep>' + re.escape(separator) +
|
||||
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
|
||||
capturing_preamble = True
|
||||
preamble = []
|
||||
linesep = False
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
if line == '':
|
||||
break
|
||||
mo = boundaryre.match(line)
|
||||
if mo:
|
||||
# If we're looking at the end boundary, we're done with
|
||||
# this multipart. If there was a newline at the end of
|
||||
# the closing boundary, then we need to initialize the
|
||||
# epilogue with the empty string (see below).
|
||||
if mo.group('end'):
|
||||
linesep = mo.group('linesep')
|
||||
break
|
||||
# We saw an inter-part boundary. Were we in the preamble?
|
||||
if capturing_preamble:
|
||||
if preamble:
|
||||
# According to RFC 2046, the last newline belongs
|
||||
# to the boundary.
|
||||
lastline = preamble[-1]
|
||||
eolmo = NLCRE_eol.search(lastline)
|
||||
if eolmo:
|
||||
preamble[-1] = lastline[:-len(eolmo.group(0))]
|
||||
self._cur.preamble = EMPTYSTRING.join(preamble)
|
||||
capturing_preamble = False
|
||||
self._input.unreadline(line)
|
||||
continue
|
||||
# We saw a boundary separating two parts. Consume any
|
||||
# multiple boundary lines that may be following. Our
|
||||
# interpretation of RFC 2046 BNF grammar does not produce
|
||||
# body parts within such double boundaries.
|
||||
while True:
|
||||
line = self._input.readline()
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
mo = boundaryre.match(line)
|
||||
if not mo:
|
||||
self._input.unreadline(line)
|
||||
break
|
||||
# Recurse to parse this subpart; the input stream points
|
||||
# at the subpart's first line.
|
||||
self._input.push_eof_matcher(boundaryre.match)
|
||||
for retval in self._parsegen():
|
||||
if retval is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
break
|
||||
# Because of RFC 2046, the newline preceding the boundary
|
||||
# separator actually belongs to the boundary, not the
|
||||
# previous subpart's payload (or epilogue if the previous
|
||||
# part is a multipart).
|
||||
if self._last.get_content_maintype() == 'multipart':
|
||||
epilogue = self._last.epilogue
|
||||
if epilogue == '':
|
||||
self._last.epilogue = None
|
||||
elif epilogue is not None:
|
||||
mo = NLCRE_eol.search(epilogue)
|
||||
if mo:
|
||||
end = len(mo.group(0))
|
||||
self._last.epilogue = epilogue[:-end]
|
||||
else:
|
||||
payload = self._last.get_payload()
|
||||
if isinstance(payload, basestring):
|
||||
mo = NLCRE_eol.search(payload)
|
||||
if mo:
|
||||
payload = payload[:-len(mo.group(0))]
|
||||
self._last.set_payload(payload)
|
||||
self._input.pop_eof_matcher()
|
||||
self._pop_message()
|
||||
# Set the multipart up for newline cleansing, which will
|
||||
# happen if we're in a nested multipart.
|
||||
self._last = self._cur
|
||||
else:
|
||||
# I think we must be in the preamble
|
||||
assert capturing_preamble
|
||||
preamble.append(line)
|
||||
# We've seen either the EOF or the end boundary. If we're still
|
||||
# capturing the preamble, we never saw the start boundary. Note
|
||||
# that as a defect and store the captured text as the payload.
|
||||
# Everything from here to the EOF is epilogue.
|
||||
if capturing_preamble:
|
||||
self._cur.defects.append(errors.StartBoundaryNotFoundDefect())
|
||||
self._cur.set_payload(EMPTYSTRING.join(preamble))
|
||||
epilogue = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
self._cur.epilogue = EMPTYSTRING.join(epilogue)
|
||||
return
|
||||
# If the end boundary ended in a newline, we'll need to make sure
|
||||
# the epilogue isn't None
|
||||
if linesep:
|
||||
epilogue = ['']
|
||||
else:
|
||||
epilogue = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
epilogue.append(line)
|
||||
# Any CRLF at the front of the epilogue is not technically part of
|
||||
# the epilogue. Also, watch out for an empty string epilogue,
|
||||
# which means a single newline.
|
||||
if epilogue:
|
||||
firstline = epilogue[0]
|
||||
bolmo = NLCRE_bol.match(firstline)
|
||||
if bolmo:
|
||||
epilogue[0] = firstline[len(bolmo.group(0)):]
|
||||
self._cur.epilogue = EMPTYSTRING.join(epilogue)
|
||||
return
|
||||
# Otherwise, it's some non-multipart type, so the entire rest of the
|
||||
# file contents becomes the payload.
|
||||
lines = []
|
||||
for line in self._input:
|
||||
if line is NeedMoreData:
|
||||
yield NeedMoreData
|
||||
continue
|
||||
lines.append(line)
|
||||
self._cur.set_payload(EMPTYSTRING.join(lines))
|
||||
|
||||
def _parse_headers(self, lines):
|
||||
# Passed a list of lines that make up the headers for the current msg
|
||||
lastheader = ''
|
||||
lastvalue = []
|
||||
for lineno, line in enumerate(lines):
|
||||
# Check for continuation
|
||||
if line[0] in ' \t':
|
||||
if not lastheader:
|
||||
# The first line of the headers was a continuation. This
|
||||
# is illegal, so let's note the defect, store the illegal
|
||||
# line, and ignore it for purposes of headers.
|
||||
defect = errors.FirstHeaderLineIsContinuationDefect(line)
|
||||
self._cur.defects.append(defect)
|
||||
continue
|
||||
lastvalue.append(line)
|
||||
continue
|
||||
if lastheader:
|
||||
# XXX reconsider the joining of folded lines
|
||||
lhdr = EMPTYSTRING.join(lastvalue)[:-1].rstrip('\r\n')
|
||||
self._cur[lastheader] = lhdr
|
||||
lastheader, lastvalue = '', []
|
||||
# Check for envelope header, i.e. unix-from
|
||||
if line.startswith('From '):
|
||||
if lineno == 0:
|
||||
# Strip off the trailing newline
|
||||
mo = NLCRE_eol.search(line)
|
||||
if mo:
|
||||
line = line[:-len(mo.group(0))]
|
||||
self._cur.set_unixfrom(line)
|
||||
continue
|
||||
elif lineno == len(lines) - 1:
|
||||
# Something looking like a unix-from at the end - it's
|
||||
# probably the first line of the body, so push back the
|
||||
# line and stop.
|
||||
self._input.unreadline(line)
|
||||
return
|
||||
else:
|
||||
# Weirdly placed unix-from line. Note this as a defect
|
||||
# and ignore it.
|
||||
defect = errors.MisplacedEnvelopeHeaderDefect(line)
|
||||
self._cur.defects.append(defect)
|
||||
continue
|
||||
# Split the line on the colon separating field name from value.
|
||||
i = line.find(':')
|
||||
if i < 0:
|
||||
defect = errors.MalformedHeaderDefect(line)
|
||||
self._cur.defects.append(defect)
|
||||
continue
|
||||
lastheader = line[:i]
|
||||
lastvalue = [line[i+1:].lstrip()]
|
||||
# Done with all the lines, so handle the last header.
|
||||
if lastheader:
|
||||
# XXX reconsider the joining of folded lines
|
||||
self._cur[lastheader] = EMPTYSTRING.join(lastvalue).rstrip('\r\n')
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,372 @@
|
|||
# Copyright (C) 2001-2010 Python Software Foundation
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Classes to generate plain text from a message object tree."""
|
||||
|
||||
__all__ = ['Generator', 'DecodedGenerator']
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
import warnings
|
||||
|
||||
from cStringIO import StringIO
|
||||
from email.header import Header
|
||||
|
||||
UNDERSCORE = '_'
|
||||
NL = '\n'
|
||||
|
||||
fcre = re.compile(r'^From ', re.MULTILINE)
|
||||
|
||||
def _is8bitstring(s):
|
||||
if isinstance(s, str):
|
||||
try:
|
||||
unicode(s, 'us-ascii')
|
||||
except UnicodeError:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
|
||||
class Generator:
|
||||
"""Generates output from a Message object tree.
|
||||
|
||||
This basic generator writes the message to the given file object as plain
|
||||
text.
|
||||
"""
|
||||
#
|
||||
# Public interface
|
||||
#
|
||||
|
||||
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
|
||||
"""Create the generator for message flattening.
|
||||
|
||||
outfp is the output file-like object for writing the message to. It
|
||||
must have a write() method.
|
||||
|
||||
Optional mangle_from_ is a flag that, when True (the default), escapes
|
||||
From_ lines in the body of the message by putting a `>' in front of
|
||||
them.
|
||||
|
||||
Optional maxheaderlen specifies the longest length for a non-continued
|
||||
header. When a header line is longer (in characters, with tabs
|
||||
expanded to 8 spaces) than maxheaderlen, the header will split as
|
||||
defined in the Header class. Set maxheaderlen to zero to disable
|
||||
header wrapping. The default is 78, as recommended (but not required)
|
||||
by RFC 2822.
|
||||
"""
|
||||
self._fp = outfp
|
||||
self._mangle_from_ = mangle_from_
|
||||
self._maxheaderlen = maxheaderlen
|
||||
|
||||
def write(self, s):
|
||||
# Just delegate to the file object
|
||||
self._fp.write(s)
|
||||
|
||||
def flatten(self, msg, unixfrom=False):
|
||||
"""Print the message object tree rooted at msg to the output file
|
||||
specified when the Generator instance was created.
|
||||
|
||||
unixfrom is a flag that forces the printing of a Unix From_ delimiter
|
||||
before the first object in the message tree. If the original message
|
||||
has no From_ delimiter, a `standard' one is crafted. By default, this
|
||||
is False to inhibit the printing of any From_ delimiter.
|
||||
|
||||
Note that for subobjects, no From_ line is printed.
|
||||
"""
|
||||
if unixfrom:
|
||||
ufrom = msg.get_unixfrom()
|
||||
if not ufrom:
|
||||
ufrom = 'From nobody ' + time.ctime(time.time())
|
||||
print >> self._fp, ufrom
|
||||
self._write(msg)
|
||||
|
||||
def clone(self, fp):
|
||||
"""Clone this generator with the exact same options."""
|
||||
return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
|
||||
|
||||
#
|
||||
# Protected interface - undocumented ;/
|
||||
#
|
||||
|
||||
def _write(self, msg):
|
||||
# We can't write the headers yet because of the following scenario:
|
||||
# say a multipart message includes the boundary string somewhere in
|
||||
# its body. We'd have to calculate the new boundary /before/ we write
|
||||
# the headers so that we can write the correct Content-Type:
|
||||
# parameter.
|
||||
#
|
||||
# The way we do this, so as to make the _handle_*() methods simpler,
|
||||
# is to cache any subpart writes into a StringIO. The we write the
|
||||
# headers and the StringIO contents. That way, subpart handlers can
|
||||
# Do The Right Thing, and can still modify the Content-Type: header if
|
||||
# necessary.
|
||||
oldfp = self._fp
|
||||
try:
|
||||
self._fp = sfp = StringIO()
|
||||
self._dispatch(msg)
|
||||
finally:
|
||||
self._fp = oldfp
|
||||
# Write the headers. First we see if the message object wants to
|
||||
# handle that itself. If not, we'll do it generically.
|
||||
meth = getattr(msg, '_write_headers', None)
|
||||
if meth is None:
|
||||
self._write_headers(msg)
|
||||
else:
|
||||
meth(self)
|
||||
self._fp.write(sfp.getvalue())
|
||||
|
||||
def _dispatch(self, msg):
|
||||
# Get the Content-Type: for the message, then try to dispatch to
|
||||
# self._handle_<maintype>_<subtype>(). If there's no handler for the
|
||||
# full MIME type, then dispatch to self._handle_<maintype>(). If
|
||||
# that's missing too, then dispatch to self._writeBody().
|
||||
main = msg.get_content_maintype()
|
||||
sub = msg.get_content_subtype()
|
||||
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
|
||||
meth = getattr(self, '_handle_' + specific, None)
|
||||
if meth is None:
|
||||
generic = main.replace('-', '_')
|
||||
meth = getattr(self, '_handle_' + generic, None)
|
||||
if meth is None:
|
||||
meth = self._writeBody
|
||||
meth(msg)
|
||||
|
||||
#
|
||||
# Default handlers
|
||||
#
|
||||
|
||||
def _write_headers(self, msg):
|
||||
for h, v in msg.items():
|
||||
print >> self._fp, '%s:' % h,
|
||||
if self._maxheaderlen == 0:
|
||||
# Explicit no-wrapping
|
||||
print >> self._fp, v
|
||||
elif isinstance(v, Header):
|
||||
# Header instances know what to do
|
||||
print >> self._fp, v.encode()
|
||||
elif _is8bitstring(v):
|
||||
# If we have raw 8bit data in a byte string, we have no idea
|
||||
# what the encoding is. There is no safe way to split this
|
||||
# string. If it's ascii-subset, then we could do a normal
|
||||
# ascii split, but if it's multibyte then we could break the
|
||||
# string. There's no way to know so the least harm seems to
|
||||
# be to not split the string and risk it being too long.
|
||||
print >> self._fp, v
|
||||
else:
|
||||
# Header's got lots of smarts, so use it. Note that this is
|
||||
# fundamentally broken though because we lose idempotency when
|
||||
# the header string is continued with tabs. It will now be
|
||||
# continued with spaces. This was reversedly broken before we
|
||||
# fixed bug 1974. Either way, we lose.
|
||||
print >> self._fp, Header(
|
||||
v, maxlinelen=self._maxheaderlen, header_name=h).encode()
|
||||
# A blank line always separates headers from body
|
||||
print >> self._fp
|
||||
|
||||
#
|
||||
# Handlers for writing types and subtypes
|
||||
#
|
||||
|
||||
def _handle_text(self, msg):
|
||||
payload = msg.get_payload()
|
||||
if payload is None:
|
||||
return
|
||||
if not isinstance(payload, basestring):
|
||||
raise TypeError('string payload expected: %s' % type(payload))
|
||||
if self._mangle_from_:
|
||||
payload = fcre.sub('>From ', payload)
|
||||
self._fp.write(payload)
|
||||
|
||||
# Default body handler
|
||||
_writeBody = _handle_text
|
||||
|
||||
def _handle_multipart(self, msg):
|
||||
# The trick here is to write out each part separately, merge them all
|
||||
# together, and then make sure that the boundary we've chosen isn't
|
||||
# present in the payload.
|
||||
msgtexts = []
|
||||
subparts = msg.get_payload()
|
||||
if subparts is None:
|
||||
subparts = []
|
||||
elif isinstance(subparts, basestring):
|
||||
# e.g. a non-strict parse of a message with no starting boundary.
|
||||
self._fp.write(subparts)
|
||||
return
|
||||
elif not isinstance(subparts, list):
|
||||
# Scalar payload
|
||||
subparts = [subparts]
|
||||
for part in subparts:
|
||||
s = StringIO()
|
||||
g = self.clone(s)
|
||||
g.flatten(part, unixfrom=False)
|
||||
msgtexts.append(s.getvalue())
|
||||
# BAW: What about boundaries that are wrapped in double-quotes?
|
||||
boundary = msg.get_boundary()
|
||||
if not boundary:
|
||||
# Create a boundary that doesn't appear in any of the
|
||||
# message texts.
|
||||
alltext = NL.join(msgtexts)
|
||||
boundary = _make_boundary(alltext)
|
||||
msg.set_boundary(boundary)
|
||||
# If there's a preamble, write it out, with a trailing CRLF
|
||||
if msg.preamble is not None:
|
||||
if self._mangle_from_:
|
||||
preamble = fcre.sub('>From ', msg.preamble)
|
||||
else:
|
||||
preamble = msg.preamble
|
||||
print >> self._fp, preamble
|
||||
# dash-boundary transport-padding CRLF
|
||||
print >> self._fp, '--' + boundary
|
||||
# body-part
|
||||
if msgtexts:
|
||||
self._fp.write(msgtexts.pop(0))
|
||||
# *encapsulation
|
||||
# --> delimiter transport-padding
|
||||
# --> CRLF body-part
|
||||
for body_part in msgtexts:
|
||||
# delimiter transport-padding CRLF
|
||||
print >> self._fp, '\n--' + boundary
|
||||
# body-part
|
||||
self._fp.write(body_part)
|
||||
# close-delimiter transport-padding
|
||||
self._fp.write('\n--' + boundary + '--')
|
||||
if msg.epilogue is not None:
|
||||
print >> self._fp
|
||||
if self._mangle_from_:
|
||||
epilogue = fcre.sub('>From ', msg.epilogue)
|
||||
else:
|
||||
epilogue = msg.epilogue
|
||||
self._fp.write(epilogue)
|
||||
|
||||
def _handle_multipart_signed(self, msg):
|
||||
# The contents of signed parts has to stay unmodified in order to keep
|
||||
# the signature intact per RFC1847 2.1, so we disable header wrapping.
|
||||
# RDM: This isn't enough to completely preserve the part, but it helps.
|
||||
old_maxheaderlen = self._maxheaderlen
|
||||
try:
|
||||
self._maxheaderlen = 0
|
||||
self._handle_multipart(msg)
|
||||
finally:
|
||||
self._maxheaderlen = old_maxheaderlen
|
||||
|
||||
def _handle_message_delivery_status(self, msg):
|
||||
# We can't just write the headers directly to self's file object
|
||||
# because this will leave an extra newline between the last header
|
||||
# block and the boundary. Sigh.
|
||||
blocks = []
|
||||
for part in msg.get_payload():
|
||||
s = StringIO()
|
||||
g = self.clone(s)
|
||||
g.flatten(part, unixfrom=False)
|
||||
text = s.getvalue()
|
||||
lines = text.split('\n')
|
||||
# Strip off the unnecessary trailing empty line
|
||||
if lines and lines[-1] == '':
|
||||
blocks.append(NL.join(lines[:-1]))
|
||||
else:
|
||||
blocks.append(text)
|
||||
# Now join all the blocks with an empty line. This has the lovely
|
||||
# effect of separating each block with an empty line, but not adding
|
||||
# an extra one after the last one.
|
||||
self._fp.write(NL.join(blocks))
|
||||
|
||||
def _handle_message(self, msg):
|
||||
s = StringIO()
|
||||
g = self.clone(s)
|
||||
# The payload of a message/rfc822 part should be a multipart sequence
|
||||
# of length 1. The zeroth element of the list should be the Message
|
||||
# object for the subpart. Extract that object, stringify it, and
|
||||
# write it out.
|
||||
# Except, it turns out, when it's a string instead, which happens when
|
||||
# and only when HeaderParser is used on a message of mime type
|
||||
# message/rfc822. Such messages are generated by, for example,
|
||||
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
|
||||
# in that case we just emit the string body.
|
||||
payload = msg.get_payload()
|
||||
if isinstance(payload, list):
|
||||
g.flatten(msg.get_payload(0), unixfrom=False)
|
||||
payload = s.getvalue()
|
||||
self._fp.write(payload)
|
||||
|
||||
|
||||
|
||||
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
|
||||
|
||||
class DecodedGenerator(Generator):
|
||||
"""Generates a text representation of a message.
|
||||
|
||||
Like the Generator base class, except that non-text parts are substituted
|
||||
with a format string representing the part.
|
||||
"""
|
||||
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
|
||||
"""Like Generator.__init__() except that an additional optional
|
||||
argument is allowed.
|
||||
|
||||
Walks through all subparts of a message. If the subpart is of main
|
||||
type `text', then it prints the decoded payload of the subpart.
|
||||
|
||||
Otherwise, fmt is a format string that is used instead of the message
|
||||
payload. fmt is expanded with the following keywords (in
|
||||
%(keyword)s format):
|
||||
|
||||
type : Full MIME type of the non-text part
|
||||
maintype : Main MIME type of the non-text part
|
||||
subtype : Sub-MIME type of the non-text part
|
||||
filename : Filename of the non-text part
|
||||
description: Description associated with the non-text part
|
||||
encoding : Content transfer encoding of the non-text part
|
||||
|
||||
The default value for fmt is None, meaning
|
||||
|
||||
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
|
||||
"""
|
||||
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
|
||||
if fmt is None:
|
||||
self._fmt = _FMT
|
||||
else:
|
||||
self._fmt = fmt
|
||||
|
||||
def _dispatch(self, msg):
|
||||
for part in msg.walk():
|
||||
maintype = part.get_content_maintype()
|
||||
if maintype == 'text':
|
||||
print >> self, part.get_payload(decode=True)
|
||||
elif maintype == 'multipart':
|
||||
# Just skip this
|
||||
pass
|
||||
else:
|
||||
print >> self, self._fmt % {
|
||||
'type' : part.get_content_type(),
|
||||
'maintype' : part.get_content_maintype(),
|
||||
'subtype' : part.get_content_subtype(),
|
||||
'filename' : part.get_filename('[no filename]'),
|
||||
'description': part.get('Content-Description',
|
||||
'[no description]'),
|
||||
'encoding' : part.get('Content-Transfer-Encoding',
|
||||
'[no encoding]'),
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Helper
|
||||
_width = len(repr(sys.maxint-1))
|
||||
_fmt = '%%0%dd' % _width
|
||||
|
||||
def _make_boundary(text=None):
|
||||
# Craft a random boundary. If text is given, ensure that the chosen
|
||||
# boundary doesn't appear in the text.
|
||||
token = random.randrange(sys.maxint)
|
||||
boundary = ('=' * 15) + (_fmt % token) + '=='
|
||||
if text is None:
|
||||
return boundary
|
||||
b = boundary
|
||||
counter = 0
|
||||
while True:
|
||||
cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
|
||||
if not cre.search(text):
|
||||
break
|
||||
b = boundary + '.' + str(counter)
|
||||
counter += 1
|
||||
return b
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,514 @@
|
|||
# Copyright (C) 2002-2006 Python Software Foundation
|
||||
# Author: Ben Gertzfield, Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Header encoding and decoding functionality."""
|
||||
|
||||
__all__ = [
|
||||
'Header',
|
||||
'decode_header',
|
||||
'make_header',
|
||||
]
|
||||
|
||||
import re
|
||||
import binascii
|
||||
|
||||
import email.quoprimime
|
||||
import email.base64mime
|
||||
|
||||
from email.errors import HeaderParseError
|
||||
from email.charset import Charset
|
||||
|
||||
NL = '\n'
|
||||
SPACE = ' '
|
||||
USPACE = u' '
|
||||
SPACE8 = ' ' * 8
|
||||
UEMPTYSTRING = u''
|
||||
|
||||
MAXLINELEN = 76
|
||||
|
||||
USASCII = Charset('us-ascii')
|
||||
UTF8 = Charset('utf-8')
|
||||
|
||||
# Match encoded-word strings in the form =?charset?q?Hello_World?=
|
||||
ecre = re.compile(r'''
|
||||
=\? # literal =?
|
||||
(?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
|
||||
\? # literal ?
|
||||
(?P<encoding>[qb]) # either a "q" or a "b", case insensitive
|
||||
\? # literal ?
|
||||
(?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
|
||||
\?= # literal ?=
|
||||
(?=[ \t]|$) # whitespace or the end of the string
|
||||
''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
|
||||
|
||||
# Field name regexp, including trailing colon, but not separating whitespace,
|
||||
# according to RFC 2822. Character range is from tilde to exclamation mark.
|
||||
# For use with .match()
|
||||
fcre = re.compile(r'[\041-\176]+:$')
|
||||
|
||||
# Find a header embedded in a putative header value. Used to check for
|
||||
# header injection attack.
|
||||
_embeded_header = re.compile(r'\n[^ \t]+:')
|
||||
|
||||
|
||||
|
||||
# Helpers
|
||||
_max_append = email.quoprimime._max_append
|
||||
|
||||
|
||||
|
||||
def decode_header(header):
|
||||
"""Decode a message header value without converting charset.
|
||||
|
||||
Returns a list of (decoded_string, charset) pairs containing each of the
|
||||
decoded parts of the header. Charset is None for non-encoded parts of the
|
||||
header, otherwise a lower-case string containing the name of the character
|
||||
set specified in the encoded string.
|
||||
|
||||
An email.errors.HeaderParseError may be raised when certain decoding error
|
||||
occurs (e.g. a base64 decoding exception).
|
||||
"""
|
||||
# If no encoding, just return the header
|
||||
header = str(header)
|
||||
if not ecre.search(header):
|
||||
return [(header, None)]
|
||||
decoded = []
|
||||
dec = ''
|
||||
for line in header.splitlines():
|
||||
# This line might not have an encoding in it
|
||||
if not ecre.search(line):
|
||||
decoded.append((line, None))
|
||||
continue
|
||||
parts = ecre.split(line)
|
||||
while parts:
|
||||
unenc = parts.pop(0).strip()
|
||||
if unenc:
|
||||
# Should we continue a long line?
|
||||
if decoded and decoded[-1][1] is None:
|
||||
decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
|
||||
else:
|
||||
decoded.append((unenc, None))
|
||||
if parts:
|
||||
charset, encoding = [s.lower() for s in parts[0:2]]
|
||||
encoded = parts[2]
|
||||
dec = None
|
||||
if encoding == 'q':
|
||||
dec = email.quoprimime.header_decode(encoded)
|
||||
elif encoding == 'b':
|
||||
paderr = len(encoded) % 4 # Postel's law: add missing padding
|
||||
if paderr:
|
||||
encoded += '==='[:4 - paderr]
|
||||
try:
|
||||
dec = email.base64mime.decode(encoded)
|
||||
except binascii.Error:
|
||||
# Turn this into a higher level exception. BAW: Right
|
||||
# now we throw the lower level exception away but
|
||||
# when/if we get exception chaining, we'll preserve it.
|
||||
raise HeaderParseError
|
||||
if dec is None:
|
||||
dec = encoded
|
||||
|
||||
if decoded and decoded[-1][1] == charset:
|
||||
decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
|
||||
else:
|
||||
decoded.append((dec, charset))
|
||||
del parts[0:3]
|
||||
return decoded
|
||||
|
||||
|
||||
|
||||
def make_header(decoded_seq, maxlinelen=None, header_name=None,
|
||||
continuation_ws=' '):
|
||||
"""Create a Header from a sequence of pairs as returned by decode_header()
|
||||
|
||||
decode_header() takes a header value string and returns a sequence of
|
||||
pairs of the format (decoded_string, charset) where charset is the string
|
||||
name of the character set.
|
||||
|
||||
This function takes one of those sequence of pairs and returns a Header
|
||||
instance. Optional maxlinelen, header_name, and continuation_ws are as in
|
||||
the Header constructor.
|
||||
"""
|
||||
h = Header(maxlinelen=maxlinelen, header_name=header_name,
|
||||
continuation_ws=continuation_ws)
|
||||
for s, charset in decoded_seq:
|
||||
# None means us-ascii but we can simply pass it on to h.append()
|
||||
if charset is not None and not isinstance(charset, Charset):
|
||||
charset = Charset(charset)
|
||||
h.append(s, charset)
|
||||
return h
|
||||
|
||||
|
||||
|
||||
class Header:
|
||||
def __init__(self, s=None, charset=None,
|
||||
maxlinelen=None, header_name=None,
|
||||
continuation_ws=' ', errors='strict'):
|
||||
"""Create a MIME-compliant header that can contain many character sets.
|
||||
|
||||
Optional s is the initial header value. If None, the initial header
|
||||
value is not set. You can later append to the header with .append()
|
||||
method calls. s may be a byte string or a Unicode string, but see the
|
||||
.append() documentation for semantics.
|
||||
|
||||
Optional charset serves two purposes: it has the same meaning as the
|
||||
charset argument to the .append() method. It also sets the default
|
||||
character set for all subsequent .append() calls that omit the charset
|
||||
argument. If charset is not provided in the constructor, the us-ascii
|
||||
charset is used both as s's initial charset and as the default for
|
||||
subsequent .append() calls.
|
||||
|
||||
The maximum line length can be specified explicit via maxlinelen. For
|
||||
splitting the first line to a shorter value (to account for the field
|
||||
header which isn't included in s, e.g. `Subject') pass in the name of
|
||||
the field in header_name. The default maxlinelen is 76.
|
||||
|
||||
continuation_ws must be RFC 2822 compliant folding whitespace (usually
|
||||
either a space or a hard tab) which will be prepended to continuation
|
||||
lines.
|
||||
|
||||
errors is passed through to the .append() call.
|
||||
"""
|
||||
if charset is None:
|
||||
charset = USASCII
|
||||
if not isinstance(charset, Charset):
|
||||
charset = Charset(charset)
|
||||
self._charset = charset
|
||||
self._continuation_ws = continuation_ws
|
||||
cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
|
||||
# BAW: I believe `chunks' and `maxlinelen' should be non-public.
|
||||
self._chunks = []
|
||||
if s is not None:
|
||||
self.append(s, charset, errors)
|
||||
if maxlinelen is None:
|
||||
maxlinelen = MAXLINELEN
|
||||
if header_name is None:
|
||||
# We don't know anything about the field header so the first line
|
||||
# is the same length as subsequent lines.
|
||||
self._firstlinelen = maxlinelen
|
||||
else:
|
||||
# The first line should be shorter to take into account the field
|
||||
# header. Also subtract off 2 extra for the colon and space.
|
||||
self._firstlinelen = maxlinelen - len(header_name) - 2
|
||||
# Second and subsequent lines should subtract off the length in
|
||||
# columns of the continuation whitespace prefix.
|
||||
self._maxlinelen = maxlinelen - cws_expanded_len
|
||||
|
||||
def __str__(self):
|
||||
"""A synonym for self.encode()."""
|
||||
return self.encode()
|
||||
|
||||
def __unicode__(self):
|
||||
"""Helper for the built-in unicode function."""
|
||||
uchunks = []
|
||||
lastcs = None
|
||||
for s, charset in self._chunks:
|
||||
# We must preserve spaces between encoded and non-encoded word
|
||||
# boundaries, which means for us we need to add a space when we go
|
||||
# from a charset to None/us-ascii, or from None/us-ascii to a
|
||||
# charset. Only do this for the second and subsequent chunks.
|
||||
nextcs = charset
|
||||
if uchunks:
|
||||
if lastcs not in (None, 'us-ascii'):
|
||||
if nextcs in (None, 'us-ascii'):
|
||||
uchunks.append(USPACE)
|
||||
nextcs = None
|
||||
elif nextcs not in (None, 'us-ascii'):
|
||||
uchunks.append(USPACE)
|
||||
lastcs = nextcs
|
||||
uchunks.append(unicode(s, str(charset)))
|
||||
return UEMPTYSTRING.join(uchunks)
|
||||
|
||||
# Rich comparison operators for equality only. BAW: does it make sense to
|
||||
# have or explicitly disable <, <=, >, >= operators?
|
||||
def __eq__(self, other):
|
||||
# other may be a Header or a string. Both are fine so coerce
|
||||
# ourselves to a string, swap the args and do another comparison.
|
||||
return other == self.encode()
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def append(self, s, charset=None, errors='strict'):
|
||||
"""Append a string to the MIME header.
|
||||
|
||||
Optional charset, if given, should be a Charset instance or the name
|
||||
of a character set (which will be converted to a Charset instance). A
|
||||
value of None (the default) means that the charset given in the
|
||||
constructor is used.
|
||||
|
||||
s may be a byte string or a Unicode string. If it is a byte string
|
||||
(i.e. isinstance(s, str) is true), then charset is the encoding of
|
||||
that byte string, and a UnicodeError will be raised if the string
|
||||
cannot be decoded with that charset. If s is a Unicode string, then
|
||||
charset is a hint specifying the character set of the characters in
|
||||
the string. In this case, when producing an RFC 2822 compliant header
|
||||
using RFC 2047 rules, the Unicode string will be encoded using the
|
||||
following charsets in order: us-ascii, the charset hint, utf-8. The
|
||||
first character set not to provoke a UnicodeError is used.
|
||||
|
||||
Optional `errors' is passed as the third argument to any unicode() or
|
||||
ustr.encode() call.
|
||||
"""
|
||||
if charset is None:
|
||||
charset = self._charset
|
||||
elif not isinstance(charset, Charset):
|
||||
charset = Charset(charset)
|
||||
# If the charset is our faux 8bit charset, leave the string unchanged
|
||||
if charset != '8bit':
|
||||
# We need to test that the string can be converted to unicode and
|
||||
# back to a byte string, given the input and output codecs of the
|
||||
# charset.
|
||||
if isinstance(s, str):
|
||||
# Possibly raise UnicodeError if the byte string can't be
|
||||
# converted to a unicode with the input codec of the charset.
|
||||
incodec = charset.input_codec or 'us-ascii'
|
||||
ustr = unicode(s, incodec, errors)
|
||||
# Now make sure that the unicode could be converted back to a
|
||||
# byte string with the output codec, which may be different
|
||||
# than the iput coded. Still, use the original byte string.
|
||||
outcodec = charset.output_codec or 'us-ascii'
|
||||
ustr.encode(outcodec, errors)
|
||||
elif isinstance(s, unicode):
|
||||
# Now we have to be sure the unicode string can be converted
|
||||
# to a byte string with a reasonable output codec. We want to
|
||||
# use the byte string in the chunk.
|
||||
for charset in USASCII, charset, UTF8:
|
||||
try:
|
||||
outcodec = charset.output_codec or 'us-ascii'
|
||||
s = s.encode(outcodec, errors)
|
||||
break
|
||||
except UnicodeError:
|
||||
pass
|
||||
else:
|
||||
assert False, 'utf-8 conversion failed'
|
||||
self._chunks.append((s, charset))
|
||||
|
||||
def _split(self, s, charset, maxlinelen, splitchars):
|
||||
# Split up a header safely for use with encode_chunks.
|
||||
splittable = charset.to_splittable(s)
|
||||
encoded = charset.from_splittable(splittable, True)
|
||||
elen = charset.encoded_header_len(encoded)
|
||||
# If the line's encoded length first, just return it
|
||||
if elen <= maxlinelen:
|
||||
return [(encoded, charset)]
|
||||
# If we have undetermined raw 8bit characters sitting in a byte
|
||||
# string, we really don't know what the right thing to do is. We
|
||||
# can't really split it because it might be multibyte data which we
|
||||
# could break if we split it between pairs. The least harm seems to
|
||||
# be to not split the header at all, but that means they could go out
|
||||
# longer than maxlinelen.
|
||||
if charset == '8bit':
|
||||
return [(s, charset)]
|
||||
# BAW: I'm not sure what the right test here is. What we're trying to
|
||||
# do is be faithful to RFC 2822's recommendation that ($2.2.3):
|
||||
#
|
||||
# "Note: Though structured field bodies are defined in such a way that
|
||||
# folding can take place between many of the lexical tokens (and even
|
||||
# within some of the lexical tokens), folding SHOULD be limited to
|
||||
# placing the CRLF at higher-level syntactic breaks."
|
||||
#
|
||||
# For now, I can only imagine doing this when the charset is us-ascii,
|
||||
# although it's possible that other charsets may also benefit from the
|
||||
# higher-level syntactic breaks.
|
||||
elif charset == 'us-ascii':
|
||||
return self._split_ascii(s, charset, maxlinelen, splitchars)
|
||||
# BAW: should we use encoded?
|
||||
elif elen == len(s):
|
||||
# We can split on _maxlinelen boundaries because we know that the
|
||||
# encoding won't change the size of the string
|
||||
splitpnt = maxlinelen
|
||||
first = charset.from_splittable(splittable[:splitpnt], False)
|
||||
last = charset.from_splittable(splittable[splitpnt:], False)
|
||||
else:
|
||||
# Binary search for split point
|
||||
first, last = _binsplit(splittable, charset, maxlinelen)
|
||||
# first is of the proper length so just wrap it in the appropriate
|
||||
# chrome. last must be recursively split.
|
||||
fsplittable = charset.to_splittable(first)
|
||||
fencoded = charset.from_splittable(fsplittable, True)
|
||||
chunk = [(fencoded, charset)]
|
||||
return chunk + self._split(last, charset, self._maxlinelen, splitchars)
|
||||
|
||||
def _split_ascii(self, s, charset, firstlen, splitchars):
|
||||
chunks = _split_ascii(s, firstlen, self._maxlinelen,
|
||||
self._continuation_ws, splitchars)
|
||||
return zip(chunks, [charset]*len(chunks))
|
||||
|
||||
def _encode_chunks(self, newchunks, maxlinelen):
|
||||
# MIME-encode a header with many different charsets and/or encodings.
|
||||
#
|
||||
# Given a list of pairs (string, charset), return a MIME-encoded
|
||||
# string suitable for use in a header field. Each pair may have
|
||||
# different charsets and/or encodings, and the resulting header will
|
||||
# accurately reflect each setting.
|
||||
#
|
||||
# Each encoding can be email.utils.QP (quoted-printable, for
|
||||
# ASCII-like character sets like iso-8859-1), email.utils.BASE64
|
||||
# (Base64, for non-ASCII like character sets like KOI8-R and
|
||||
# iso-2022-jp), or None (no encoding).
|
||||
#
|
||||
# Each pair will be represented on a separate line; the resulting
|
||||
# string will be in the format:
|
||||
#
|
||||
# =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
|
||||
# =?charset2?b?SvxyZ2VuIEL2aW5n?="
|
||||
chunks = []
|
||||
for header, charset in newchunks:
|
||||
if not header:
|
||||
continue
|
||||
if charset is None or charset.header_encoding is None:
|
||||
s = header
|
||||
else:
|
||||
s = charset.header_encode(header)
|
||||
# Don't add more folding whitespace than necessary
|
||||
if chunks and chunks[-1].endswith(' '):
|
||||
extra = ''
|
||||
else:
|
||||
extra = ' '
|
||||
_max_append(chunks, s, maxlinelen, extra)
|
||||
joiner = NL + self._continuation_ws
|
||||
return joiner.join(chunks)
|
||||
|
||||
def encode(self, splitchars=';, '):
|
||||
"""Encode a message header into an RFC-compliant format.
|
||||
|
||||
There are many issues involved in converting a given string for use in
|
||||
an email header. Only certain character sets are readable in most
|
||||
email clients, and as header strings can only contain a subset of
|
||||
7-bit ASCII, care must be taken to properly convert and encode (with
|
||||
Base64 or quoted-printable) header strings. In addition, there is a
|
||||
75-character length limit on any given encoded header field, so
|
||||
line-wrapping must be performed, even with double-byte character sets.
|
||||
|
||||
This method will do its best to convert the string to the correct
|
||||
character set used in email, and encode and line wrap it safely with
|
||||
the appropriate scheme for that character set.
|
||||
|
||||
If the given charset is not known or an error occurs during
|
||||
conversion, this function will return the header untouched.
|
||||
|
||||
Optional splitchars is a string containing characters to split long
|
||||
ASCII lines on, in rough support of RFC 2822's `highest level
|
||||
syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
|
||||
"""
|
||||
newchunks = []
|
||||
maxlinelen = self._firstlinelen
|
||||
lastlen = 0
|
||||
for s, charset in self._chunks:
|
||||
# The first bit of the next chunk should be just long enough to
|
||||
# fill the next line. Don't forget the space separating the
|
||||
# encoded words.
|
||||
targetlen = maxlinelen - lastlen - 1
|
||||
if targetlen < charset.encoded_header_len(''):
|
||||
# Stick it on the next line
|
||||
targetlen = maxlinelen
|
||||
newchunks += self._split(s, charset, targetlen, splitchars)
|
||||
lastchunk, lastcharset = newchunks[-1]
|
||||
lastlen = lastcharset.encoded_header_len(lastchunk)
|
||||
value = self._encode_chunks(newchunks, maxlinelen)
|
||||
if _embeded_header.search(value):
|
||||
raise HeaderParseError("header value appears to contain "
|
||||
"an embedded header: {!r}".format(value))
|
||||
return value
|
||||
|
||||
|
||||
|
||||
def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
|
||||
lines = []
|
||||
maxlen = firstlen
|
||||
for line in s.splitlines():
|
||||
# Ignore any leading whitespace (i.e. continuation whitespace) already
|
||||
# on the line, since we'll be adding our own.
|
||||
line = line.lstrip()
|
||||
if len(line) < maxlen:
|
||||
lines.append(line)
|
||||
maxlen = restlen
|
||||
continue
|
||||
# Attempt to split the line at the highest-level syntactic break
|
||||
# possible. Note that we don't have a lot of smarts about field
|
||||
# syntax; we just try to break on semi-colons, then commas, then
|
||||
# whitespace.
|
||||
for ch in splitchars:
|
||||
if ch in line:
|
||||
break
|
||||
else:
|
||||
# There's nothing useful to split the line on, not even spaces, so
|
||||
# just append this line unchanged
|
||||
lines.append(line)
|
||||
maxlen = restlen
|
||||
continue
|
||||
# Now split the line on the character plus trailing whitespace
|
||||
cre = re.compile(r'%s\s*' % ch)
|
||||
if ch in ';,':
|
||||
eol = ch
|
||||
else:
|
||||
eol = ''
|
||||
joiner = eol + ' '
|
||||
joinlen = len(joiner)
|
||||
wslen = len(continuation_ws.replace('\t', SPACE8))
|
||||
this = []
|
||||
linelen = 0
|
||||
for part in cre.split(line):
|
||||
curlen = linelen + max(0, len(this)-1) * joinlen
|
||||
partlen = len(part)
|
||||
onfirstline = not lines
|
||||
# We don't want to split after the field name, if we're on the
|
||||
# first line and the field name is present in the header string.
|
||||
if ch == ' ' and onfirstline and \
|
||||
len(this) == 1 and fcre.match(this[0]):
|
||||
this.append(part)
|
||||
linelen += partlen
|
||||
elif curlen + partlen > maxlen:
|
||||
if this:
|
||||
lines.append(joiner.join(this) + eol)
|
||||
# If this part is longer than maxlen and we aren't already
|
||||
# splitting on whitespace, try to recursively split this line
|
||||
# on whitespace.
|
||||
if partlen > maxlen and ch != ' ':
|
||||
subl = _split_ascii(part, maxlen, restlen,
|
||||
continuation_ws, ' ')
|
||||
lines.extend(subl[:-1])
|
||||
this = [subl[-1]]
|
||||
else:
|
||||
this = [part]
|
||||
linelen = wslen + len(this[-1])
|
||||
maxlen = restlen
|
||||
else:
|
||||
this.append(part)
|
||||
linelen += partlen
|
||||
# Put any left over parts on a line by themselves
|
||||
if this:
|
||||
lines.append(joiner.join(this))
|
||||
return lines
|
||||
|
||||
|
||||
|
||||
def _binsplit(splittable, charset, maxlinelen):
|
||||
i = 0
|
||||
j = len(splittable)
|
||||
while i < j:
|
||||
# Invariants:
|
||||
# 1. splittable[:k] fits for all k <= i (note that we *assume*,
|
||||
# at the start, that splittable[:0] fits).
|
||||
# 2. splittable[:k] does not fit for any k > j (at the start,
|
||||
# this means we shouldn't look at any k > len(splittable)).
|
||||
# 3. We don't know about splittable[:k] for k in i+1..j.
|
||||
# 4. We want to set i to the largest k that fits, with i <= k <= j.
|
||||
#
|
||||
m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
|
||||
chunk = charset.from_splittable(splittable[:m], True)
|
||||
chunklen = charset.encoded_header_len(chunk)
|
||||
if chunklen <= maxlinelen:
|
||||
# m is acceptable, so is a new lower bound.
|
||||
i = m
|
||||
else:
|
||||
# m is not acceptable, so final i must be < m.
|
||||
j = m - 1
|
||||
# i == j. Invariant #1 implies that splittable[:i] fits, and
|
||||
# invariant #2 implies that splittable[:i+1] does not fit, so i
|
||||
# is what we're looking for.
|
||||
first = charset.from_splittable(splittable[:i], False)
|
||||
last = charset.from_splittable(splittable[i:], False)
|
||||
return first, last
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,73 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Various types of useful iterators and generators."""
|
||||
|
||||
__all__ = [
|
||||
'body_line_iterator',
|
||||
'typed_subpart_iterator',
|
||||
'walk',
|
||||
# Do not include _structure() since it's part of the debugging API.
|
||||
]
|
||||
|
||||
import sys
|
||||
from cStringIO import StringIO
|
||||
|
||||
|
||||
|
||||
# This function will become a method of the Message class
|
||||
def walk(self):
|
||||
"""Walk over the message tree, yielding each subpart.
|
||||
|
||||
The walk is performed in depth-first order. This method is a
|
||||
generator.
|
||||
"""
|
||||
yield self
|
||||
if self.is_multipart():
|
||||
for subpart in self.get_payload():
|
||||
for subsubpart in subpart.walk():
|
||||
yield subsubpart
|
||||
|
||||
|
||||
|
||||
# These two functions are imported into the Iterators.py interface module.
|
||||
def body_line_iterator(msg, decode=False):
|
||||
"""Iterate over the parts, returning string payloads line-by-line.
|
||||
|
||||
Optional decode (default False) is passed through to .get_payload().
|
||||
"""
|
||||
for subpart in msg.walk():
|
||||
payload = subpart.get_payload(decode=decode)
|
||||
if isinstance(payload, basestring):
|
||||
for line in StringIO(payload):
|
||||
yield line
|
||||
|
||||
|
||||
def typed_subpart_iterator(msg, maintype='text', subtype=None):
|
||||
"""Iterate over the subparts with a given MIME type.
|
||||
|
||||
Use `maintype' as the main MIME type to match against; this defaults to
|
||||
"text". Optional `subtype' is the MIME subtype to match against; if
|
||||
omitted, only the main type is matched.
|
||||
"""
|
||||
for subpart in msg.walk():
|
||||
if subpart.get_content_maintype() == maintype:
|
||||
if subtype is None or subpart.get_content_subtype() == subtype:
|
||||
yield subpart
|
||||
|
||||
|
||||
|
||||
def _structure(msg, fp=None, level=0, include_default=False):
|
||||
"""A handy debugging aid"""
|
||||
if fp is None:
|
||||
fp = sys.stdout
|
||||
tab = ' ' * (level * 4)
|
||||
print >> fp, tab + msg.get_content_type(),
|
||||
if include_default:
|
||||
print >> fp, '[%s]' % msg.get_default_type()
|
||||
else:
|
||||
print >> fp
|
||||
if msg.is_multipart():
|
||||
for subpart in msg.get_payload():
|
||||
_structure(subpart, fp, level+1, include_default)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,797 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Basic message object for the email package object model."""
|
||||
|
||||
__all__ = ['Message']
|
||||
|
||||
import re
|
||||
import uu
|
||||
import binascii
|
||||
import warnings
|
||||
from cStringIO import StringIO
|
||||
|
||||
# Intrapackage imports
|
||||
import email.charset
|
||||
from email import utils
|
||||
from email import errors
|
||||
|
||||
SEMISPACE = '; '
|
||||
|
||||
# Regular expression that matches `special' characters in parameters, the
|
||||
# existence of which force quoting of the parameter value.
|
||||
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
|
||||
|
||||
|
||||
# Helper functions
|
||||
def _splitparam(param):
|
||||
# Split header parameters. BAW: this may be too simple. It isn't
|
||||
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
|
||||
# found in the wild. We may eventually need a full fledged parser
|
||||
# eventually.
|
||||
a, sep, b = param.partition(';')
|
||||
if not sep:
|
||||
return a.strip(), None
|
||||
return a.strip(), b.strip()
|
||||
|
||||
def _formatparam(param, value=None, quote=True):
|
||||
"""Convenience function to format and return a key=value pair.
|
||||
|
||||
This will quote the value if needed or if quote is true. If value is a
|
||||
three tuple (charset, language, value), it will be encoded according
|
||||
to RFC2231 rules.
|
||||
"""
|
||||
if value is not None and len(value) > 0:
|
||||
# A tuple is used for RFC 2231 encoded parameter values where items
|
||||
# are (charset, language, value). charset is a string, not a Charset
|
||||
# instance.
|
||||
if isinstance(value, tuple):
|
||||
# Encode as per RFC 2231
|
||||
param += '*'
|
||||
value = utils.encode_rfc2231(value[2], value[0], value[1])
|
||||
# BAW: Please check this. I think that if quote is set it should
|
||||
# force quoting even if not necessary.
|
||||
if quote or tspecials.search(value):
|
||||
return '%s="%s"' % (param, utils.quote(value))
|
||||
else:
|
||||
return '%s=%s' % (param, value)
|
||||
else:
|
||||
return param
|
||||
|
||||
def _parseparam(s):
|
||||
plist = []
|
||||
while s[:1] == ';':
|
||||
s = s[1:]
|
||||
end = s.find(';')
|
||||
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
|
||||
end = s.find(';', end + 1)
|
||||
if end < 0:
|
||||
end = len(s)
|
||||
f = s[:end]
|
||||
if '=' in f:
|
||||
i = f.index('=')
|
||||
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
|
||||
plist.append(f.strip())
|
||||
s = s[end:]
|
||||
return plist
|
||||
|
||||
|
||||
def _unquotevalue(value):
|
||||
# This is different than utils.collapse_rfc2231_value() because it doesn't
|
||||
# try to convert the value to a unicode. Message.get_param() and
|
||||
# Message.get_params() are both currently defined to return the tuple in
|
||||
# the face of RFC 2231 parameters.
|
||||
if isinstance(value, tuple):
|
||||
return value[0], value[1], utils.unquote(value[2])
|
||||
else:
|
||||
return utils.unquote(value)
|
||||
|
||||
|
||||
|
||||
class Message:
|
||||
"""Basic message object.
|
||||
|
||||
A message object is defined as something that has a bunch of RFC 2822
|
||||
headers and a payload. It may optionally have an envelope header
|
||||
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
|
||||
multipart or a message/rfc822), then the payload is a list of Message
|
||||
objects, otherwise it is a string.
|
||||
|
||||
Message objects implement part of the `mapping' interface, which assumes
|
||||
there is exactly one occurrence of the header per message. Some headers
|
||||
do in fact appear multiple times (e.g. Received) and for those headers,
|
||||
you must use the explicit API to set or get all the headers. Not all of
|
||||
the mapping methods are implemented.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._headers = []
|
||||
self._unixfrom = None
|
||||
self._payload = None
|
||||
self._charset = None
|
||||
# Defaults for multipart messages
|
||||
self.preamble = self.epilogue = None
|
||||
self.defects = []
|
||||
# Default content type
|
||||
self._default_type = 'text/plain'
|
||||
|
||||
def __str__(self):
|
||||
"""Return the entire formatted message as a string.
|
||||
This includes the headers, body, and envelope header.
|
||||
"""
|
||||
return self.as_string(unixfrom=True)
|
||||
|
||||
def as_string(self, unixfrom=False):
|
||||
"""Return the entire formatted message as a string.
|
||||
Optional `unixfrom' when True, means include the Unix From_ envelope
|
||||
header.
|
||||
|
||||
This is a convenience method and may not generate the message exactly
|
||||
as you intend because by default it mangles lines that begin with
|
||||
"From ". For more flexibility, use the flatten() method of a
|
||||
Generator instance.
|
||||
"""
|
||||
from email.generator import Generator
|
||||
fp = StringIO()
|
||||
g = Generator(fp)
|
||||
g.flatten(self, unixfrom=unixfrom)
|
||||
return fp.getvalue()
|
||||
|
||||
def is_multipart(self):
|
||||
"""Return True if the message consists of multiple parts."""
|
||||
return isinstance(self._payload, list)
|
||||
|
||||
#
|
||||
# Unix From_ line
|
||||
#
|
||||
def set_unixfrom(self, unixfrom):
|
||||
self._unixfrom = unixfrom
|
||||
|
||||
def get_unixfrom(self):
|
||||
return self._unixfrom
|
||||
|
||||
#
|
||||
# Payload manipulation.
|
||||
#
|
||||
def attach(self, payload):
|
||||
"""Add the given payload to the current payload.
|
||||
|
||||
The current payload will always be a list of objects after this method
|
||||
is called. If you want to set the payload to a scalar object, use
|
||||
set_payload() instead.
|
||||
"""
|
||||
if self._payload is None:
|
||||
self._payload = [payload]
|
||||
else:
|
||||
self._payload.append(payload)
|
||||
|
||||
def get_payload(self, i=None, decode=False):
|
||||
"""Return a reference to the payload.
|
||||
|
||||
The payload will either be a list object or a string. If you mutate
|
||||
the list object, you modify the message's payload in place. Optional
|
||||
i returns that index into the payload.
|
||||
|
||||
Optional decode is a flag indicating whether the payload should be
|
||||
decoded or not, according to the Content-Transfer-Encoding header
|
||||
(default is False).
|
||||
|
||||
When True and the message is not a multipart, the payload will be
|
||||
decoded if this header's value is `quoted-printable' or `base64'. If
|
||||
some other encoding is used, or the header is missing, or if the
|
||||
payload has bogus data (i.e. bogus base64 or uuencoded data), the
|
||||
payload is returned as-is.
|
||||
|
||||
If the message is a multipart and the decode flag is True, then None
|
||||
is returned.
|
||||
"""
|
||||
if i is None:
|
||||
payload = self._payload
|
||||
elif not isinstance(self._payload, list):
|
||||
raise TypeError('Expected list, got %s' % type(self._payload))
|
||||
else:
|
||||
payload = self._payload[i]
|
||||
if decode:
|
||||
if self.is_multipart():
|
||||
return None
|
||||
cte = self.get('content-transfer-encoding', '').lower()
|
||||
if cte == 'quoted-printable':
|
||||
return utils._qdecode(payload)
|
||||
elif cte == 'base64':
|
||||
try:
|
||||
return utils._bdecode(payload)
|
||||
except binascii.Error:
|
||||
# Incorrect padding
|
||||
return payload
|
||||
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
|
||||
sfp = StringIO()
|
||||
try:
|
||||
uu.decode(StringIO(payload+'\n'), sfp, quiet=True)
|
||||
payload = sfp.getvalue()
|
||||
except uu.Error:
|
||||
# Some decoding problem
|
||||
return payload
|
||||
# Everything else, including encodings with 8bit or 7bit are returned
|
||||
# unchanged.
|
||||
return payload
|
||||
|
||||
def set_payload(self, payload, charset=None):
|
||||
"""Set the payload to the given value.
|
||||
|
||||
Optional charset sets the message's default character set. See
|
||||
set_charset() for details.
|
||||
"""
|
||||
self._payload = payload
|
||||
if charset is not None:
|
||||
self.set_charset(charset)
|
||||
|
||||
def set_charset(self, charset):
|
||||
"""Set the charset of the payload to a given character set.
|
||||
|
||||
charset can be a Charset instance, a string naming a character set, or
|
||||
None. If it is a string it will be converted to a Charset instance.
|
||||
If charset is None, the charset parameter will be removed from the
|
||||
Content-Type field. Anything else will generate a TypeError.
|
||||
|
||||
The message will be assumed to be of type text/* encoded with
|
||||
charset.input_charset. It will be converted to charset.output_charset
|
||||
and encoded properly, if needed, when generating the plain text
|
||||
representation of the message. MIME headers (MIME-Version,
|
||||
Content-Type, Content-Transfer-Encoding) will be added as needed.
|
||||
|
||||
"""
|
||||
if charset is None:
|
||||
self.del_param('charset')
|
||||
self._charset = None
|
||||
return
|
||||
if isinstance(charset, basestring):
|
||||
charset = email.charset.Charset(charset)
|
||||
if not isinstance(charset, email.charset.Charset):
|
||||
raise TypeError(charset)
|
||||
# BAW: should we accept strings that can serve as arguments to the
|
||||
# Charset constructor?
|
||||
self._charset = charset
|
||||
if 'MIME-Version' not in self:
|
||||
self.add_header('MIME-Version', '1.0')
|
||||
if 'Content-Type' not in self:
|
||||
self.add_header('Content-Type', 'text/plain',
|
||||
charset=charset.get_output_charset())
|
||||
else:
|
||||
self.set_param('charset', charset.get_output_charset())
|
||||
if isinstance(self._payload, unicode):
|
||||
self._payload = self._payload.encode(charset.output_charset)
|
||||
if str(charset) != charset.get_output_charset():
|
||||
self._payload = charset.body_encode(self._payload)
|
||||
if 'Content-Transfer-Encoding' not in self:
|
||||
cte = charset.get_body_encoding()
|
||||
try:
|
||||
cte(self)
|
||||
except TypeError:
|
||||
self._payload = charset.body_encode(self._payload)
|
||||
self.add_header('Content-Transfer-Encoding', cte)
|
||||
|
||||
def get_charset(self):
|
||||
"""Return the Charset instance associated with the message's payload.
|
||||
"""
|
||||
return self._charset
|
||||
|
||||
#
|
||||
# MAPPING INTERFACE (partial)
|
||||
#
|
||||
def __len__(self):
|
||||
"""Return the total number of headers, including duplicates."""
|
||||
return len(self._headers)
|
||||
|
||||
def __getitem__(self, name):
|
||||
"""Get a header value.
|
||||
|
||||
Return None if the header is missing instead of raising an exception.
|
||||
|
||||
Note that if the header appeared multiple times, exactly which
|
||||
occurrence gets returned is undefined. Use get_all() to get all
|
||||
the values matching a header field name.
|
||||
"""
|
||||
return self.get(name)
|
||||
|
||||
def __setitem__(self, name, val):
|
||||
"""Set the value of a header.
|
||||
|
||||
Note: this does not overwrite an existing header with the same field
|
||||
name. Use __delitem__() first to delete any existing headers.
|
||||
"""
|
||||
self._headers.append((name, val))
|
||||
|
||||
def __delitem__(self, name):
|
||||
"""Delete all occurrences of a header, if present.
|
||||
|
||||
Does not raise an exception if the header is missing.
|
||||
"""
|
||||
name = name.lower()
|
||||
newheaders = []
|
||||
for k, v in self._headers:
|
||||
if k.lower() != name:
|
||||
newheaders.append((k, v))
|
||||
self._headers = newheaders
|
||||
|
||||
def __contains__(self, name):
|
||||
return name.lower() in [k.lower() for k, v in self._headers]
|
||||
|
||||
def has_key(self, name):
|
||||
"""Return true if the message contains the header."""
|
||||
missing = object()
|
||||
return self.get(name, missing) is not missing
|
||||
|
||||
def keys(self):
|
||||
"""Return a list of all the message's header field names.
|
||||
|
||||
These will be sorted in the order they appeared in the original
|
||||
message, or were added to the message, and may contain duplicates.
|
||||
Any fields deleted and re-inserted are always appended to the header
|
||||
list.
|
||||
"""
|
||||
return [k for k, v in self._headers]
|
||||
|
||||
def values(self):
|
||||
"""Return a list of all the message's header values.
|
||||
|
||||
These will be sorted in the order they appeared in the original
|
||||
message, or were added to the message, and may contain duplicates.
|
||||
Any fields deleted and re-inserted are always appended to the header
|
||||
list.
|
||||
"""
|
||||
return [v for k, v in self._headers]
|
||||
|
||||
def items(self):
|
||||
"""Get all the message's header fields and values.
|
||||
|
||||
These will be sorted in the order they appeared in the original
|
||||
message, or were added to the message, and may contain duplicates.
|
||||
Any fields deleted and re-inserted are always appended to the header
|
||||
list.
|
||||
"""
|
||||
return self._headers[:]
|
||||
|
||||
def get(self, name, failobj=None):
|
||||
"""Get a header value.
|
||||
|
||||
Like __getitem__() but return failobj instead of None when the field
|
||||
is missing.
|
||||
"""
|
||||
name = name.lower()
|
||||
for k, v in self._headers:
|
||||
if k.lower() == name:
|
||||
return v
|
||||
return failobj
|
||||
|
||||
#
|
||||
# Additional useful stuff
|
||||
#
|
||||
|
||||
def get_all(self, name, failobj=None):
|
||||
"""Return a list of all the values for the named field.
|
||||
|
||||
These will be sorted in the order they appeared in the original
|
||||
message, and may contain duplicates. Any fields deleted and
|
||||
re-inserted are always appended to the header list.
|
||||
|
||||
If no such fields exist, failobj is returned (defaults to None).
|
||||
"""
|
||||
values = []
|
||||
name = name.lower()
|
||||
for k, v in self._headers:
|
||||
if k.lower() == name:
|
||||
values.append(v)
|
||||
if not values:
|
||||
return failobj
|
||||
return values
|
||||
|
||||
def add_header(self, _name, _value, **_params):
|
||||
"""Extended header setting.
|
||||
|
||||
name is the header field to add. keyword arguments can be used to set
|
||||
additional parameters for the header field, with underscores converted
|
||||
to dashes. Normally the parameter will be added as key="value" unless
|
||||
value is None, in which case only the key will be added. If a
|
||||
parameter value contains non-ASCII characters it must be specified as a
|
||||
three-tuple of (charset, language, value), in which case it will be
|
||||
encoded according to RFC2231 rules.
|
||||
|
||||
Example:
|
||||
|
||||
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
|
||||
"""
|
||||
parts = []
|
||||
for k, v in _params.items():
|
||||
if v is None:
|
||||
parts.append(k.replace('_', '-'))
|
||||
else:
|
||||
parts.append(_formatparam(k.replace('_', '-'), v))
|
||||
if _value is not None:
|
||||
parts.insert(0, _value)
|
||||
self._headers.append((_name, SEMISPACE.join(parts)))
|
||||
|
||||
def replace_header(self, _name, _value):
|
||||
"""Replace a header.
|
||||
|
||||
Replace the first matching header found in the message, retaining
|
||||
header order and case. If no matching header was found, a KeyError is
|
||||
raised.
|
||||
"""
|
||||
_name = _name.lower()
|
||||
for i, (k, v) in zip(range(len(self._headers)), self._headers):
|
||||
if k.lower() == _name:
|
||||
self._headers[i] = (k, _value)
|
||||
break
|
||||
else:
|
||||
raise KeyError(_name)
|
||||
|
||||
#
|
||||
# Use these three methods instead of the three above.
|
||||
#
|
||||
|
||||
def get_content_type(self):
|
||||
"""Return the message's content type.
|
||||
|
||||
The returned string is coerced to lower case of the form
|
||||
`maintype/subtype'. If there was no Content-Type header in the
|
||||
message, the default type as given by get_default_type() will be
|
||||
returned. Since according to RFC 2045, messages always have a default
|
||||
type this will always return a value.
|
||||
|
||||
RFC 2045 defines a message's default type to be text/plain unless it
|
||||
appears inside a multipart/digest container, in which case it would be
|
||||
message/rfc822.
|
||||
"""
|
||||
missing = object()
|
||||
value = self.get('content-type', missing)
|
||||
if value is missing:
|
||||
# This should have no parameters
|
||||
return self.get_default_type()
|
||||
ctype = _splitparam(value)[0].lower()
|
||||
# RFC 2045, section 5.2 says if its invalid, use text/plain
|
||||
if ctype.count('/') != 1:
|
||||
return 'text/plain'
|
||||
return ctype
|
||||
|
||||
def get_content_maintype(self):
|
||||
"""Return the message's main content type.
|
||||
|
||||
This is the `maintype' part of the string returned by
|
||||
get_content_type().
|
||||
"""
|
||||
ctype = self.get_content_type()
|
||||
return ctype.split('/')[0]
|
||||
|
||||
def get_content_subtype(self):
|
||||
"""Returns the message's sub-content type.
|
||||
|
||||
This is the `subtype' part of the string returned by
|
||||
get_content_type().
|
||||
"""
|
||||
ctype = self.get_content_type()
|
||||
return ctype.split('/')[1]
|
||||
|
||||
def get_default_type(self):
|
||||
"""Return the `default' content type.
|
||||
|
||||
Most messages have a default content type of text/plain, except for
|
||||
messages that are subparts of multipart/digest containers. Such
|
||||
subparts have a default content type of message/rfc822.
|
||||
"""
|
||||
return self._default_type
|
||||
|
||||
def set_default_type(self, ctype):
|
||||
"""Set the `default' content type.
|
||||
|
||||
ctype should be either "text/plain" or "message/rfc822", although this
|
||||
is not enforced. The default content type is not stored in the
|
||||
Content-Type header.
|
||||
"""
|
||||
self._default_type = ctype
|
||||
|
||||
def _get_params_preserve(self, failobj, header):
|
||||
# Like get_params() but preserves the quoting of values. BAW:
|
||||
# should this be part of the public interface?
|
||||
missing = object()
|
||||
value = self.get(header, missing)
|
||||
if value is missing:
|
||||
return failobj
|
||||
params = []
|
||||
for p in _parseparam(';' + value):
|
||||
try:
|
||||
name, val = p.split('=', 1)
|
||||
name = name.strip()
|
||||
val = val.strip()
|
||||
except ValueError:
|
||||
# Must have been a bare attribute
|
||||
name = p.strip()
|
||||
val = ''
|
||||
params.append((name, val))
|
||||
params = utils.decode_params(params)
|
||||
return params
|
||||
|
||||
def get_params(self, failobj=None, header='content-type', unquote=True):
|
||||
"""Return the message's Content-Type parameters, as a list.
|
||||
|
||||
The elements of the returned list are 2-tuples of key/value pairs, as
|
||||
split on the `=' sign. The left hand side of the `=' is the key,
|
||||
while the right hand side is the value. If there is no `=' sign in
|
||||
the parameter the value is the empty string. The value is as
|
||||
described in the get_param() method.
|
||||
|
||||
Optional failobj is the object to return if there is no Content-Type
|
||||
header. Optional header is the header to search instead of
|
||||
Content-Type. If unquote is True, the value is unquoted.
|
||||
"""
|
||||
missing = object()
|
||||
params = self._get_params_preserve(missing, header)
|
||||
if params is missing:
|
||||
return failobj
|
||||
if unquote:
|
||||
return [(k, _unquotevalue(v)) for k, v in params]
|
||||
else:
|
||||
return params
|
||||
|
||||
def get_param(self, param, failobj=None, header='content-type',
|
||||
unquote=True):
|
||||
"""Return the parameter value if found in the Content-Type header.
|
||||
|
||||
Optional failobj is the object to return if there is no Content-Type
|
||||
header, or the Content-Type header has no such parameter. Optional
|
||||
header is the header to search instead of Content-Type.
|
||||
|
||||
Parameter keys are always compared case insensitively. The return
|
||||
value can either be a string, or a 3-tuple if the parameter was RFC
|
||||
2231 encoded. When it's a 3-tuple, the elements of the value are of
|
||||
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
|
||||
LANGUAGE can be None, in which case you should consider VALUE to be
|
||||
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
|
||||
|
||||
Your application should be prepared to deal with 3-tuple return
|
||||
values, and can convert the parameter to a Unicode string like so:
|
||||
|
||||
param = msg.get_param('foo')
|
||||
if isinstance(param, tuple):
|
||||
param = unicode(param[2], param[0] or 'us-ascii')
|
||||
|
||||
In any case, the parameter value (either the returned string, or the
|
||||
VALUE item in the 3-tuple) is always unquoted, unless unquote is set
|
||||
to False.
|
||||
"""
|
||||
if header not in self:
|
||||
return failobj
|
||||
for k, v in self._get_params_preserve(failobj, header):
|
||||
if k.lower() == param.lower():
|
||||
if unquote:
|
||||
return _unquotevalue(v)
|
||||
else:
|
||||
return v
|
||||
return failobj
|
||||
|
||||
def set_param(self, param, value, header='Content-Type', requote=True,
|
||||
charset=None, language=''):
|
||||
"""Set a parameter in the Content-Type header.
|
||||
|
||||
If the parameter already exists in the header, its value will be
|
||||
replaced with the new value.
|
||||
|
||||
If header is Content-Type and has not yet been defined for this
|
||||
message, it will be set to "text/plain" and the new parameter and
|
||||
value will be appended as per RFC 2045.
|
||||
|
||||
An alternate header can specified in the header argument, and all
|
||||
parameters will be quoted as necessary unless requote is False.
|
||||
|
||||
If charset is specified, the parameter will be encoded according to RFC
|
||||
2231. Optional language specifies the RFC 2231 language, defaulting
|
||||
to the empty string. Both charset and language should be strings.
|
||||
"""
|
||||
if not isinstance(value, tuple) and charset:
|
||||
value = (charset, language, value)
|
||||
|
||||
if header not in self and header.lower() == 'content-type':
|
||||
ctype = 'text/plain'
|
||||
else:
|
||||
ctype = self.get(header)
|
||||
if not self.get_param(param, header=header):
|
||||
if not ctype:
|
||||
ctype = _formatparam(param, value, requote)
|
||||
else:
|
||||
ctype = SEMISPACE.join(
|
||||
[ctype, _formatparam(param, value, requote)])
|
||||
else:
|
||||
ctype = ''
|
||||
for old_param, old_value in self.get_params(header=header,
|
||||
unquote=requote):
|
||||
append_param = ''
|
||||
if old_param.lower() == param.lower():
|
||||
append_param = _formatparam(param, value, requote)
|
||||
else:
|
||||
append_param = _formatparam(old_param, old_value, requote)
|
||||
if not ctype:
|
||||
ctype = append_param
|
||||
else:
|
||||
ctype = SEMISPACE.join([ctype, append_param])
|
||||
if ctype != self.get(header):
|
||||
del self[header]
|
||||
self[header] = ctype
|
||||
|
||||
def del_param(self, param, header='content-type', requote=True):
|
||||
"""Remove the given parameter completely from the Content-Type header.
|
||||
|
||||
The header will be re-written in place without the parameter or its
|
||||
value. All values will be quoted as necessary unless requote is
|
||||
False. Optional header specifies an alternative to the Content-Type
|
||||
header.
|
||||
"""
|
||||
if header not in self:
|
||||
return
|
||||
new_ctype = ''
|
||||
for p, v in self.get_params(header=header, unquote=requote):
|
||||
if p.lower() != param.lower():
|
||||
if not new_ctype:
|
||||
new_ctype = _formatparam(p, v, requote)
|
||||
else:
|
||||
new_ctype = SEMISPACE.join([new_ctype,
|
||||
_formatparam(p, v, requote)])
|
||||
if new_ctype != self.get(header):
|
||||
del self[header]
|
||||
self[header] = new_ctype
|
||||
|
||||
def set_type(self, type, header='Content-Type', requote=True):
|
||||
"""Set the main type and subtype for the Content-Type header.
|
||||
|
||||
type must be a string in the form "maintype/subtype", otherwise a
|
||||
ValueError is raised.
|
||||
|
||||
This method replaces the Content-Type header, keeping all the
|
||||
parameters in place. If requote is False, this leaves the existing
|
||||
header's quoting as is. Otherwise, the parameters will be quoted (the
|
||||
default).
|
||||
|
||||
An alternative header can be specified in the header argument. When
|
||||
the Content-Type header is set, we'll always also add a MIME-Version
|
||||
header.
|
||||
"""
|
||||
# BAW: should we be strict?
|
||||
if not type.count('/') == 1:
|
||||
raise ValueError
|
||||
# Set the Content-Type, you get a MIME-Version
|
||||
if header.lower() == 'content-type':
|
||||
del self['mime-version']
|
||||
self['MIME-Version'] = '1.0'
|
||||
if header not in self:
|
||||
self[header] = type
|
||||
return
|
||||
params = self.get_params(header=header, unquote=requote)
|
||||
del self[header]
|
||||
self[header] = type
|
||||
# Skip the first param; it's the old type.
|
||||
for p, v in params[1:]:
|
||||
self.set_param(p, v, header, requote)
|
||||
|
||||
def get_filename(self, failobj=None):
|
||||
"""Return the filename associated with the payload if present.
|
||||
|
||||
The filename is extracted from the Content-Disposition header's
|
||||
`filename' parameter, and it is unquoted. If that header is missing
|
||||
the `filename' parameter, this method falls back to looking for the
|
||||
`name' parameter.
|
||||
"""
|
||||
missing = object()
|
||||
filename = self.get_param('filename', missing, 'content-disposition')
|
||||
if filename is missing:
|
||||
filename = self.get_param('name', missing, 'content-type')
|
||||
if filename is missing:
|
||||
return failobj
|
||||
return utils.collapse_rfc2231_value(filename).strip()
|
||||
|
||||
def get_boundary(self, failobj=None):
|
||||
"""Return the boundary associated with the payload if present.
|
||||
|
||||
The boundary is extracted from the Content-Type header's `boundary'
|
||||
parameter, and it is unquoted.
|
||||
"""
|
||||
missing = object()
|
||||
boundary = self.get_param('boundary', missing)
|
||||
if boundary is missing:
|
||||
return failobj
|
||||
# RFC 2046 says that boundaries may begin but not end in w/s
|
||||
return utils.collapse_rfc2231_value(boundary).rstrip()
|
||||
|
||||
def set_boundary(self, boundary):
|
||||
"""Set the boundary parameter in Content-Type to 'boundary'.
|
||||
|
||||
This is subtly different than deleting the Content-Type header and
|
||||
adding a new one with a new boundary parameter via add_header(). The
|
||||
main difference is that using the set_boundary() method preserves the
|
||||
order of the Content-Type header in the original message.
|
||||
|
||||
HeaderParseError is raised if the message has no Content-Type header.
|
||||
"""
|
||||
missing = object()
|
||||
params = self._get_params_preserve(missing, 'content-type')
|
||||
if params is missing:
|
||||
# There was no Content-Type header, and we don't know what type
|
||||
# to set it to, so raise an exception.
|
||||
raise errors.HeaderParseError('No Content-Type header found')
|
||||
newparams = []
|
||||
foundp = False
|
||||
for pk, pv in params:
|
||||
if pk.lower() == 'boundary':
|
||||
newparams.append(('boundary', '"%s"' % boundary))
|
||||
foundp = True
|
||||
else:
|
||||
newparams.append((pk, pv))
|
||||
if not foundp:
|
||||
# The original Content-Type header had no boundary attribute.
|
||||
# Tack one on the end. BAW: should we raise an exception
|
||||
# instead???
|
||||
newparams.append(('boundary', '"%s"' % boundary))
|
||||
# Replace the existing Content-Type header with the new value
|
||||
newheaders = []
|
||||
for h, v in self._headers:
|
||||
if h.lower() == 'content-type':
|
||||
parts = []
|
||||
for k, v in newparams:
|
||||
if v == '':
|
||||
parts.append(k)
|
||||
else:
|
||||
parts.append('%s=%s' % (k, v))
|
||||
newheaders.append((h, SEMISPACE.join(parts)))
|
||||
|
||||
else:
|
||||
newheaders.append((h, v))
|
||||
self._headers = newheaders
|
||||
|
||||
def get_content_charset(self, failobj=None):
|
||||
"""Return the charset parameter of the Content-Type header.
|
||||
|
||||
The returned string is always coerced to lower case. If there is no
|
||||
Content-Type header, or if that header has no charset parameter,
|
||||
failobj is returned.
|
||||
"""
|
||||
missing = object()
|
||||
charset = self.get_param('charset', missing)
|
||||
if charset is missing:
|
||||
return failobj
|
||||
if isinstance(charset, tuple):
|
||||
# RFC 2231 encoded, so decode it, and it better end up as ascii.
|
||||
pcharset = charset[0] or 'us-ascii'
|
||||
try:
|
||||
# LookupError will be raised if the charset isn't known to
|
||||
# Python. UnicodeError will be raised if the encoded text
|
||||
# contains a character not in the charset.
|
||||
charset = unicode(charset[2], pcharset).encode('us-ascii')
|
||||
except (LookupError, UnicodeError):
|
||||
charset = charset[2]
|
||||
# charset character must be in us-ascii range
|
||||
try:
|
||||
if isinstance(charset, str):
|
||||
charset = unicode(charset, 'us-ascii')
|
||||
charset = charset.encode('us-ascii')
|
||||
except UnicodeError:
|
||||
return failobj
|
||||
# RFC 2046, $4.1.2 says charsets are not case sensitive
|
||||
return charset.lower()
|
||||
|
||||
def get_charsets(self, failobj=None):
|
||||
"""Return a list containing the charset(s) used in this message.
|
||||
|
||||
The returned list of items describes the Content-Type headers'
|
||||
charset parameter for this message and all the subparts in its
|
||||
payload.
|
||||
|
||||
Each item will either be a string (the value of the charset parameter
|
||||
in the Content-Type header of that part) or the value of the
|
||||
'failobj' parameter (defaults to None), if the part does not have a
|
||||
main MIME type of "text", or the charset is not defined.
|
||||
|
||||
The list will contain one string for each part of the message, plus
|
||||
one for the container message (i.e. self), so that a non-multipart
|
||||
message will still return a list of length 1.
|
||||
"""
|
||||
return [part.get_content_charset(failobj) for part in self.walk()]
|
||||
|
||||
# I.e. def walk(self): ...
|
||||
from email.iterators import walk
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,36 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Keith Dart
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Class representing application/* type MIME documents."""
|
||||
|
||||
__all__ = ["MIMEApplication"]
|
||||
|
||||
from email import encoders
|
||||
from email.mime.nonmultipart import MIMENonMultipart
|
||||
|
||||
|
||||
class MIMEApplication(MIMENonMultipart):
|
||||
"""Class for generating application/* MIME documents."""
|
||||
|
||||
def __init__(self, _data, _subtype='octet-stream',
|
||||
_encoder=encoders.encode_base64, **_params):
|
||||
"""Create an application/* type MIME document.
|
||||
|
||||
_data is a string containing the raw application data.
|
||||
|
||||
_subtype is the MIME content type subtype, defaulting to
|
||||
'octet-stream'.
|
||||
|
||||
_encoder is a function which will perform the actual encoding for
|
||||
transport of the application data, defaulting to base64 encoding.
|
||||
|
||||
Any additional keyword arguments are passed to the base class
|
||||
constructor, which turns them into parameters on the Content-Type
|
||||
header.
|
||||
"""
|
||||
if _subtype is None:
|
||||
raise TypeError('Invalid application MIME subtype')
|
||||
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
|
||||
self.set_payload(_data)
|
||||
_encoder(self)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,73 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Anthony Baxter
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Class representing audio/* type MIME documents."""
|
||||
|
||||
__all__ = ['MIMEAudio']
|
||||
|
||||
import sndhdr
|
||||
|
||||
from cStringIO import StringIO
|
||||
from email import encoders
|
||||
from email.mime.nonmultipart import MIMENonMultipart
|
||||
|
||||
|
||||
|
||||
_sndhdr_MIMEmap = {'au' : 'basic',
|
||||
'wav' :'x-wav',
|
||||
'aiff':'x-aiff',
|
||||
'aifc':'x-aiff',
|
||||
}
|
||||
|
||||
# There are others in sndhdr that don't have MIME types. :(
|
||||
# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
|
||||
def _whatsnd(data):
|
||||
"""Try to identify a sound file type.
|
||||
|
||||
sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
|
||||
we re-do it here. It would be easier to reverse engineer the Unix 'file'
|
||||
command and use the standard 'magic' file, as shipped with a modern Unix.
|
||||
"""
|
||||
hdr = data[:512]
|
||||
fakefile = StringIO(hdr)
|
||||
for testfn in sndhdr.tests:
|
||||
res = testfn(hdr, fakefile)
|
||||
if res is not None:
|
||||
return _sndhdr_MIMEmap.get(res[0])
|
||||
return None
|
||||
|
||||
|
||||
|
||||
class MIMEAudio(MIMENonMultipart):
|
||||
"""Class for generating audio/* MIME documents."""
|
||||
|
||||
def __init__(self, _audiodata, _subtype=None,
|
||||
_encoder=encoders.encode_base64, **_params):
|
||||
"""Create an audio/* type MIME document.
|
||||
|
||||
_audiodata is a string containing the raw audio data. If this data
|
||||
can be decoded by the standard Python `sndhdr' module, then the
|
||||
subtype will be automatically included in the Content-Type header.
|
||||
Otherwise, you can specify the specific audio subtype via the
|
||||
_subtype parameter. If _subtype is not given, and no subtype can be
|
||||
guessed, a TypeError is raised.
|
||||
|
||||
_encoder is a function which will perform the actual encoding for
|
||||
transport of the image data. It takes one argument, which is this
|
||||
Image instance. It should use get_payload() and set_payload() to
|
||||
change the payload to the encoded form. It should also add any
|
||||
Content-Transfer-Encoding or other headers to the message as
|
||||
necessary. The default encoding is Base64.
|
||||
|
||||
Any additional keyword arguments are passed to the base class
|
||||
constructor, which turns them into parameters on the Content-Type
|
||||
header.
|
||||
"""
|
||||
if _subtype is None:
|
||||
_subtype = _whatsnd(_audiodata)
|
||||
if _subtype is None:
|
||||
raise TypeError('Could not find audio MIME subtype')
|
||||
MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
|
||||
self.set_payload(_audiodata)
|
||||
_encoder(self)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,26 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Base class for MIME specializations."""
|
||||
|
||||
__all__ = ['MIMEBase']
|
||||
|
||||
from email import message
|
||||
|
||||
|
||||
|
||||
class MIMEBase(message.Message):
|
||||
"""Base class for MIME specializations."""
|
||||
|
||||
def __init__(self, _maintype, _subtype, **_params):
|
||||
"""This constructor adds a Content-Type: and a MIME-Version: header.
|
||||
|
||||
The Content-Type: header is taken from the _maintype and _subtype
|
||||
arguments. Additional parameters for this header are taken from the
|
||||
keyword arguments.
|
||||
"""
|
||||
message.Message.__init__(self)
|
||||
ctype = '%s/%s' % (_maintype, _subtype)
|
||||
self.add_header('Content-Type', ctype, **_params)
|
||||
self['MIME-Version'] = '1.0'
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,46 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Class representing image/* type MIME documents."""
|
||||
|
||||
__all__ = ['MIMEImage']
|
||||
|
||||
import imghdr
|
||||
|
||||
from email import encoders
|
||||
from email.mime.nonmultipart import MIMENonMultipart
|
||||
|
||||
|
||||
|
||||
class MIMEImage(MIMENonMultipart):
|
||||
"""Class for generating image/* type MIME documents."""
|
||||
|
||||
def __init__(self, _imagedata, _subtype=None,
|
||||
_encoder=encoders.encode_base64, **_params):
|
||||
"""Create an image/* type MIME document.
|
||||
|
||||
_imagedata is a string containing the raw image data. If this data
|
||||
can be decoded by the standard Python `imghdr' module, then the
|
||||
subtype will be automatically included in the Content-Type header.
|
||||
Otherwise, you can specify the specific image subtype via the _subtype
|
||||
parameter.
|
||||
|
||||
_encoder is a function which will perform the actual encoding for
|
||||
transport of the image data. It takes one argument, which is this
|
||||
Image instance. It should use get_payload() and set_payload() to
|
||||
change the payload to the encoded form. It should also add any
|
||||
Content-Transfer-Encoding or other headers to the message as
|
||||
necessary. The default encoding is Base64.
|
||||
|
||||
Any additional keyword arguments are passed to the base class
|
||||
constructor, which turns them into parameters on the Content-Type
|
||||
header.
|
||||
"""
|
||||
if _subtype is None:
|
||||
_subtype = imghdr.what(None, _imagedata)
|
||||
if _subtype is None:
|
||||
raise TypeError('Could not guess image MIME subtype')
|
||||
MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
|
||||
self.set_payload(_imagedata)
|
||||
_encoder(self)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,34 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Class representing message/* MIME documents."""
|
||||
|
||||
__all__ = ['MIMEMessage']
|
||||
|
||||
from email import message
|
||||
from email.mime.nonmultipart import MIMENonMultipart
|
||||
|
||||
|
||||
|
||||
class MIMEMessage(MIMENonMultipart):
|
||||
"""Class representing message/* MIME documents."""
|
||||
|
||||
def __init__(self, _msg, _subtype='rfc822'):
|
||||
"""Create a message/* type MIME document.
|
||||
|
||||
_msg is a message object and must be an instance of Message, or a
|
||||
derived class of Message, otherwise a TypeError is raised.
|
||||
|
||||
Optional _subtype defines the subtype of the contained message. The
|
||||
default is "rfc822" (this is defined by the MIME standard, even though
|
||||
the term "rfc822" is technically outdated by RFC 2822).
|
||||
"""
|
||||
MIMENonMultipart.__init__(self, 'message', _subtype)
|
||||
if not isinstance(_msg, message.Message):
|
||||
raise TypeError('Argument is not an instance of Message')
|
||||
# It's convenient to use this base class method. We need to do it
|
||||
# this way or we'll get an exception
|
||||
message.Message.attach(self, _msg)
|
||||
# And be sure our default type is set correctly
|
||||
self.set_default_type('message/rfc822')
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,47 @@
|
|||
# Copyright (C) 2002-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Base class for MIME multipart/* type messages."""
|
||||
|
||||
__all__ = ['MIMEMultipart']
|
||||
|
||||
from email.mime.base import MIMEBase
|
||||
|
||||
|
||||
|
||||
class MIMEMultipart(MIMEBase):
|
||||
"""Base class for MIME multipart/* type messages."""
|
||||
|
||||
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
|
||||
**_params):
|
||||
"""Creates a multipart/* type message.
|
||||
|
||||
By default, creates a multipart/mixed message, with proper
|
||||
Content-Type and MIME-Version headers.
|
||||
|
||||
_subtype is the subtype of the multipart content type, defaulting to
|
||||
`mixed'.
|
||||
|
||||
boundary is the multipart boundary string. By default it is
|
||||
calculated as needed.
|
||||
|
||||
_subparts is a sequence of initial subparts for the payload. It
|
||||
must be an iterable object, such as a list. You can always
|
||||
attach new subparts to the message by using the attach() method.
|
||||
|
||||
Additional parameters for the Content-Type header are taken from the
|
||||
keyword arguments (or passed into the _params argument).
|
||||
"""
|
||||
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
|
||||
|
||||
# Initialise _payload to an empty list as the Message superclass's
|
||||
# implementation of is_multipart assumes that _payload is a list for
|
||||
# multipart messages.
|
||||
self._payload = []
|
||||
|
||||
if _subparts:
|
||||
for p in _subparts:
|
||||
self.attach(p)
|
||||
if boundary:
|
||||
self.set_boundary(boundary)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,22 @@
|
|||
# Copyright (C) 2002-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Base class for MIME type messages that are not multipart."""
|
||||
|
||||
__all__ = ['MIMENonMultipart']
|
||||
|
||||
from email import errors
|
||||
from email.mime.base import MIMEBase
|
||||
|
||||
|
||||
|
||||
class MIMENonMultipart(MIMEBase):
|
||||
"""Base class for MIME multipart/* type messages."""
|
||||
|
||||
def attach(self, payload):
|
||||
# The public API prohibits attaching multiple subparts to MIMEBase
|
||||
# derived subtypes since none of them are, by definition, of content
|
||||
# type multipart/*
|
||||
raise errors.MultipartConversionError(
|
||||
'Cannot attach additional subparts to non-multipart/*')
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,30 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Class representing text/* type MIME documents."""
|
||||
|
||||
__all__ = ['MIMEText']
|
||||
|
||||
from email.encoders import encode_7or8bit
|
||||
from email.mime.nonmultipart import MIMENonMultipart
|
||||
|
||||
|
||||
|
||||
class MIMEText(MIMENonMultipart):
|
||||
"""Class for generating text/* type MIME documents."""
|
||||
|
||||
def __init__(self, _text, _subtype='plain', _charset='us-ascii'):
|
||||
"""Create a text/* type MIME document.
|
||||
|
||||
_text is the string for this message object.
|
||||
|
||||
_subtype is the MIME sub content type, defaulting to "plain".
|
||||
|
||||
_charset is the character set parameter added to the Content-Type
|
||||
header. This defaults to "us-ascii". Note that as a side-effect, the
|
||||
Content-Transfer-Encoding header will also be set.
|
||||
"""
|
||||
MIMENonMultipart.__init__(self, 'text', _subtype,
|
||||
**{'charset': _charset})
|
||||
self.set_payload(_text, _charset)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,91 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""A parser of RFC 2822 and MIME email messages."""
|
||||
|
||||
__all__ = ['Parser', 'HeaderParser']
|
||||
|
||||
import warnings
|
||||
from cStringIO import StringIO
|
||||
|
||||
from email.feedparser import FeedParser
|
||||
from email.message import Message
|
||||
|
||||
|
||||
|
||||
class Parser:
|
||||
def __init__(self, *args, **kws):
|
||||
"""Parser of RFC 2822 and MIME email messages.
|
||||
|
||||
Creates an in-memory object tree representing the email message, which
|
||||
can then be manipulated and turned over to a Generator to return the
|
||||
textual representation of the message.
|
||||
|
||||
The string must be formatted as a block of RFC 2822 headers and header
|
||||
continuation lines, optionally preceeded by a `Unix-from' header. The
|
||||
header block is terminated either by the end of the string or by a
|
||||
blank line.
|
||||
|
||||
_class is the class to instantiate for new message objects when they
|
||||
must be created. This class must have a constructor that can take
|
||||
zero arguments. Default is Message.Message.
|
||||
"""
|
||||
if len(args) >= 1:
|
||||
if '_class' in kws:
|
||||
raise TypeError("Multiple values for keyword arg '_class'")
|
||||
kws['_class'] = args[0]
|
||||
if len(args) == 2:
|
||||
if 'strict' in kws:
|
||||
raise TypeError("Multiple values for keyword arg 'strict'")
|
||||
kws['strict'] = args[1]
|
||||
if len(args) > 2:
|
||||
raise TypeError('Too many arguments')
|
||||
if '_class' in kws:
|
||||
self._class = kws['_class']
|
||||
del kws['_class']
|
||||
else:
|
||||
self._class = Message
|
||||
if 'strict' in kws:
|
||||
warnings.warn("'strict' argument is deprecated (and ignored)",
|
||||
DeprecationWarning, 2)
|
||||
del kws['strict']
|
||||
if kws:
|
||||
raise TypeError('Unexpected keyword arguments')
|
||||
|
||||
def parse(self, fp, headersonly=False):
|
||||
"""Create a message structure from the data in a file.
|
||||
|
||||
Reads all the data from the file and returns the root of the message
|
||||
structure. Optional headersonly is a flag specifying whether to stop
|
||||
parsing after reading the headers or not. The default is False,
|
||||
meaning it parses the entire contents of the file.
|
||||
"""
|
||||
feedparser = FeedParser(self._class)
|
||||
if headersonly:
|
||||
feedparser._set_headersonly()
|
||||
while True:
|
||||
data = fp.read(8192)
|
||||
if not data:
|
||||
break
|
||||
feedparser.feed(data)
|
||||
return feedparser.close()
|
||||
|
||||
def parsestr(self, text, headersonly=False):
|
||||
"""Create a message structure from a string.
|
||||
|
||||
Returns the root of the message structure. Optional headersonly is a
|
||||
flag specifying whether to stop parsing after reading the headers or
|
||||
not. The default is False, meaning it parses the entire contents of
|
||||
the file.
|
||||
"""
|
||||
return self.parse(StringIO(text), headersonly=headersonly)
|
||||
|
||||
|
||||
|
||||
class HeaderParser(Parser):
|
||||
def parse(self, fp, headersonly=True):
|
||||
return Parser.parse(self, fp, True)
|
||||
|
||||
def parsestr(self, text, headersonly=True):
|
||||
return Parser.parsestr(self, text, True)
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,336 @@
|
|||
# Copyright (C) 2001-2006 Python Software Foundation
|
||||
# Author: Ben Gertzfield
|
||||
# Contact: email-sig@python.org
|
||||
|
||||
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
|
||||
|
||||
This module handles the content transfer encoding method defined in RFC 2045
|
||||
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
|
||||
safely encode text that is in a character set similar to the 7-bit US ASCII
|
||||
character set, but that includes some 8-bit characters that are normally not
|
||||
allowed in email bodies or headers.
|
||||
|
||||
Quoted-printable is very space-inefficient for encoding binary files; use the
|
||||
email.base64mime module for that instead.
|
||||
|
||||
This module provides an interface to encode and decode both headers and bodies
|
||||
with quoted-printable encoding.
|
||||
|
||||
RFC 2045 defines a method for including character set information in an
|
||||
`encoded-word' in a header. This method is commonly used for 8-bit real names
|
||||
in To:/From:/Cc: etc. fields, as well as Subject: lines.
|
||||
|
||||
This module does not do the line wrapping or end-of-line character
|
||||
conversion necessary for proper internationalized headers; it only
|
||||
does dumb encoding and decoding. To deal with the various line
|
||||
wrapping issues, use the email.header module.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
'body_decode',
|
||||
'body_encode',
|
||||
'body_quopri_check',
|
||||
'body_quopri_len',
|
||||
'decode',
|
||||
'decodestring',
|
||||
'encode',
|
||||
'encodestring',
|
||||
'header_decode',
|
||||
'header_encode',
|
||||
'header_quopri_check',
|
||||
'header_quopri_len',
|
||||
'quote',
|
||||
'unquote',
|
||||
]
|
||||
|
||||
import re
|
||||
|
||||
from string import hexdigits
|
||||
from email.utils import fix_eols
|
||||
|
||||
CRLF = '\r\n'
|
||||
NL = '\n'
|
||||
|
||||
# See also Charset.py
|
||||
MISC_LEN = 7
|
||||
|
||||
hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
|
||||
bqre = re.compile(r'[^ !-<>-~\t]')
|
||||
|
||||
|
||||
|
||||
# Helpers
|
||||
def header_quopri_check(c):
|
||||
"""Return True if the character should be escaped with header quopri."""
|
||||
return bool(hqre.match(c))
|
||||
|
||||
|
||||
def body_quopri_check(c):
|
||||
"""Return True if the character should be escaped with body quopri."""
|
||||
return bool(bqre.match(c))
|
||||
|
||||
|
||||
def header_quopri_len(s):
|
||||
"""Return the length of str when it is encoded with header quopri."""
|
||||
count = 0
|
||||
for c in s:
|
||||
if hqre.match(c):
|
||||
count += 3
|
||||
else:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
|
||||
def body_quopri_len(str):
|
||||
"""Return the length of str when it is encoded with body quopri."""
|
||||
count = 0
|
||||
for c in str:
|
||||
if bqre.match(c):
|
||||
count += 3
|
||||
else:
|
||||
count += 1
|
||||
return count
|
||||
|
||||
|
||||
def _max_append(L, s, maxlen, extra=''):
|
||||
if not L:
|
||||
L.append(s.lstrip())
|
||||
elif len(L[-1]) + len(s) <= maxlen:
|
||||
L[-1] += extra + s
|
||||
else:
|
||||
L.append(s.lstrip())
|
||||
|
||||
|
||||
def unquote(s):
|
||||
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
|
||||
return chr(int(s[1:3], 16))
|
||||
|
||||
|
||||
def quote(c):
|
||||
return "=%02X" % ord(c)
|
||||
|
||||
|
||||
|
||||
def header_encode(header, charset="iso-8859-1", keep_eols=False,
|
||||
maxlinelen=76, eol=NL):
|
||||
"""Encode a single header line with quoted-printable (like) encoding.
|
||||
|
||||
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
|
||||
used specifically for email header fields to allow charsets with mostly 7
|
||||
bit characters (and some 8 bit) to remain more or less readable in non-RFC
|
||||
2045 aware mail clients.
|
||||
|
||||
charset names the character set to use to encode the header. It defaults
|
||||
to iso-8859-1.
|
||||
|
||||
The resulting string will be in the form:
|
||||
|
||||
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
|
||||
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
|
||||
|
||||
with each line wrapped safely at, at most, maxlinelen characters (defaults
|
||||
to 76 characters). If maxlinelen is None, the entire string is encoded in
|
||||
one chunk with no splitting.
|
||||
|
||||
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
|
||||
to the canonical email line separator \\r\\n unless the keep_eols
|
||||
parameter is True (the default is False).
|
||||
|
||||
Each line of the header will be terminated in the value of eol, which
|
||||
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
|
||||
this function directly in email.
|
||||
"""
|
||||
# Return empty headers unchanged
|
||||
if not header:
|
||||
return header
|
||||
|
||||
if not keep_eols:
|
||||
header = fix_eols(header)
|
||||
|
||||
# Quopri encode each line, in encoded chunks no greater than maxlinelen in
|
||||
# length, after the RFC chrome is added in.
|
||||
quoted = []
|
||||
if maxlinelen is None:
|
||||
# An obnoxiously large number that's good enough
|
||||
max_encoded = 100000
|
||||
else:
|
||||
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
|
||||
|
||||
for c in header:
|
||||
# Space may be represented as _ instead of =20 for readability
|
||||
if c == ' ':
|
||||
_max_append(quoted, '_', max_encoded)
|
||||
# These characters can be included verbatim
|
||||
elif not hqre.match(c):
|
||||
_max_append(quoted, c, max_encoded)
|
||||
# Otherwise, replace with hex value like =E2
|
||||
else:
|
||||
_max_append(quoted, "=%02X" % ord(c), max_encoded)
|
||||
|
||||
# Now add the RFC chrome to each encoded chunk and glue the chunks
|
||||
# together. BAW: should we be able to specify the leading whitespace in
|
||||
# the joiner?
|
||||
joiner = eol + ' '
|
||||
return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
|
||||
|
||||
|
||||
|
||||
def encode(body, binary=False, maxlinelen=76, eol=NL):
|
||||
"""Encode with quoted-printable, wrapping at maxlinelen characters.
|
||||
|
||||
If binary is False (the default), end-of-line characters will be converted
|
||||
to the canonical email end-of-line sequence \\r\\n. Otherwise they will
|
||||
be left verbatim.
|
||||
|
||||
Each line of encoded text will end with eol, which defaults to "\\n". Set
|
||||
this to "\\r\\n" if you will be using the result of this function directly
|
||||
in an email.
|
||||
|
||||
Each line will be wrapped at, at most, maxlinelen characters (defaults to
|
||||
76 characters). Long lines will have the `soft linefeed' quoted-printable
|
||||
character "=" appended to them, so the decoded text will be identical to
|
||||
the original text.
|
||||
"""
|
||||
if not body:
|
||||
return body
|
||||
|
||||
if not binary:
|
||||
body = fix_eols(body)
|
||||
|
||||
# BAW: We're accumulating the body text by string concatenation. That
|
||||
# can't be very efficient, but I don't have time now to rewrite it. It
|
||||
# just feels like this algorithm could be more efficient.
|
||||
encoded_body = ''
|
||||
lineno = -1
|
||||
# Preserve line endings here so we can check later to see an eol needs to
|
||||
# be added to the output later.
|
||||
lines = body.splitlines(1)
|
||||
for line in lines:
|
||||
# But strip off line-endings for processing this line.
|
||||
if line.endswith(CRLF):
|
||||
line = line[:-2]
|
||||
elif line[-1] in CRLF:
|
||||
line = line[:-1]
|
||||
|
||||
lineno += 1
|
||||
encoded_line = ''
|
||||
prev = None
|
||||
linelen = len(line)
|
||||
# Now we need to examine every character to see if it needs to be
|
||||
# quopri encoded. BAW: again, string concatenation is inefficient.
|
||||
for j in range(linelen):
|
||||
c = line[j]
|
||||
prev = c
|
||||
if bqre.match(c):
|
||||
c = quote(c)
|
||||
elif j+1 == linelen:
|
||||
# Check for whitespace at end of line; special case
|
||||
if c not in ' \t':
|
||||
encoded_line += c
|
||||
prev = c
|
||||
continue
|
||||
# Check to see to see if the line has reached its maximum length
|
||||
if len(encoded_line) + len(c) >= maxlinelen:
|
||||
encoded_body += encoded_line + '=' + eol
|
||||
encoded_line = ''
|
||||
encoded_line += c
|
||||
# Now at end of line..
|
||||
if prev and prev in ' \t':
|
||||
# Special case for whitespace at end of file
|
||||
if lineno + 1 == len(lines):
|
||||
prev = quote(prev)
|
||||
if len(encoded_line) + len(prev) > maxlinelen:
|
||||
encoded_body += encoded_line + '=' + eol + prev
|
||||
else:
|
||||
encoded_body += encoded_line + prev
|
||||
# Just normal whitespace at end of line
|
||||
else:
|
||||
encoded_body += encoded_line + prev + '=' + eol
|
||||
encoded_line = ''
|
||||
# Now look at the line we just finished and it has a line ending, we
|
||||
# need to add eol to the end of the line.
|
||||
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
|
||||
encoded_body += encoded_line + eol
|
||||
else:
|
||||
encoded_body += encoded_line
|
||||
encoded_line = ''
|
||||
return encoded_body
|
||||
|
||||
|
||||
# For convenience and backwards compatibility w/ standard base64 module
|
||||
body_encode = encode
|
||||
encodestring = encode
|
||||
|
||||
|
||||
|
||||
# BAW: I'm not sure if the intent was for the signature of this function to be
|
||||
# the same as base64MIME.decode() or not...
|
||||
def decode(encoded, eol=NL):
|
||||
"""Decode a quoted-printable string.
|
||||
|
||||
Lines are separated with eol, which defaults to \\n.
|
||||
"""
|
||||
if not encoded:
|
||||
return encoded
|
||||
# BAW: see comment in encode() above. Again, we're building up the
|
||||
# decoded string with string concatenation, which could be done much more
|
||||
# efficiently.
|
||||
decoded = ''
|
||||
|
||||
for line in encoded.splitlines():
|
||||
line = line.rstrip()
|
||||
if not line:
|
||||
decoded += eol
|
||||
continue
|
||||
|
||||
i = 0
|
||||
n = len(line)
|
||||
while i < n:
|
||||
c = line[i]
|
||||
if c != '=':
|
||||
decoded += c
|
||||
i += 1
|
||||
# Otherwise, c == "=". Are we at the end of the line? If so, add
|
||||
# a soft line break.
|
||||
elif i+1 == n:
|
||||
i += 1
|
||||
continue
|
||||
# Decode if in form =AB
|
||||
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
|
||||
decoded += unquote(line[i:i+3])
|
||||
i += 3
|
||||
# Otherwise, not in form =AB, pass literally
|
||||
else:
|
||||
decoded += c
|
||||
i += 1
|
||||
|
||||
if i == n:
|
||||
decoded += eol
|
||||
# Special case if original string did not end with eol
|
||||
if not encoded.endswith(eol) and decoded.endswith(eol):
|
||||
decoded = decoded[:-1]
|
||||
return decoded
|
||||
|
||||
|
||||
# For convenience and backwards compatibility w/ standard base64 module
|
||||
body_decode = decode
|
||||
decodestring = decode
|
||||
|
||||
|
||||
|
||||
def _unquote_match(match):
|
||||
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
|
||||
s = match.group(0)
|
||||
return unquote(s)
|
||||
|
||||
|
||||
# Header decoding is done a bit differently
|
||||
def header_decode(s):
|
||||
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
|
||||
|
||||
This function does not parse a full MIME header value encoded with
|
||||
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
|
||||
the high level email.header class for that functionality.
|
||||
"""
|
||||
s = s.replace('_', ' ')
|
||||
return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
After Width: | Height: | Size: 954 B |
Binary file not shown.
|
@ -0,0 +1,19 @@
|
|||
Return-Path: <bbb@zzz.org>
|
||||
Delivered-To: bbb@zzz.org
|
||||
Received: by mail.zzz.org (Postfix, from userid 889)
|
||||
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
|
||||
From: bbb@ddd.com (John X. Doe)
|
||||
To: bbb@zzz.org
|
||||
Subject: This is a test message
|
||||
Date: Fri, 4 May 2001 14:05:44 -0400
|
||||
|
||||
|
||||
Hi,
|
||||
|
||||
Do you like this message?
|
||||
|
||||
-Me
|
|
@ -0,0 +1,135 @@
|
|||
MIME-version: 1.0
|
||||
From: ppp-request@zzz.org
|
||||
Sender: ppp-admin@zzz.org
|
||||
To: ppp@zzz.org
|
||||
Subject: Ppp digest, Vol 1 #2 - 5 msgs
|
||||
Date: Fri, 20 Apr 2001 20:18:00 -0400 (EDT)
|
||||
X-Mailer: Mailman v2.0.4
|
||||
X-Mailman-Version: 2.0.4
|
||||
Content-Type: multipart/mixed; boundary="192.168.1.2.889.32614.987812255.500.21814"
|
||||
|
||||
--192.168.1.2.889.32614.987812255.500.21814
|
||||
Content-type: text/plain; charset=us-ascii
|
||||
Content-description: Masthead (Ppp digest, Vol 1 #2)
|
||||
|
||||
Send Ppp mailing list submissions to
|
||||
ppp@zzz.org
|
||||
|
||||
To subscribe or unsubscribe via the World Wide Web, visit
|
||||
http://www.zzz.org/mailman/listinfo/ppp
|
||||
or, via email, send a message with subject or body 'help' to
|
||||
ppp-request@zzz.org
|
||||
|
||||
You can reach the person managing the list at
|
||||
ppp-admin@zzz.org
|
||||
|
||||
When replying, please edit your Subject line so it is more specific
|
||||
than "Re: Contents of Ppp digest..."
|
||||
|
||||
|
||||
--192.168.1.2.889.32614.987812255.500.21814
|
||||
Content-type: text/plain; charset=us-ascii
|
||||
Content-description: Today's Topics (5 msgs)
|
||||
|
||||
Today's Topics:
|
||||
|
||||
1. testing #1 (Barry A. Warsaw)
|
||||
2. testing #2 (Barry A. Warsaw)
|
||||
3. testing #3 (Barry A. Warsaw)
|
||||
4. testing #4 (Barry A. Warsaw)
|
||||
5. testing #5 (Barry A. Warsaw)
|
||||
|
||||
--192.168.1.2.889.32614.987812255.500.21814
|
||||
Content-Type: multipart/digest; boundary="__--__--"
|
||||
|
||||
--__--__--
|
||||
|
||||
Message: 1
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Date: Fri, 20 Apr 2001 20:16:13 -0400
|
||||
To: ppp@zzz.org
|
||||
From: barry@digicool.com (Barry A. Warsaw)
|
||||
Subject: [Ppp] testing #1
|
||||
Precedence: bulk
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
--__--__--
|
||||
|
||||
Message: 2
|
||||
Date: Fri, 20 Apr 2001 20:16:21 -0400
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
To: ppp@zzz.org
|
||||
From: barry@digicool.com (Barry A. Warsaw)
|
||||
Precedence: bulk
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
--__--__--
|
||||
|
||||
Message: 3
|
||||
Date: Fri, 20 Apr 2001 20:16:25 -0400
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
To: ppp@zzz.org
|
||||
From: barry@digicool.com (Barry A. Warsaw)
|
||||
Subject: [Ppp] testing #3
|
||||
Precedence: bulk
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
--__--__--
|
||||
|
||||
Message: 4
|
||||
Date: Fri, 20 Apr 2001 20:16:28 -0400
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
To: ppp@zzz.org
|
||||
From: barry@digicool.com (Barry A. Warsaw)
|
||||
Subject: [Ppp] testing #4
|
||||
Precedence: bulk
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
--__--__--
|
||||
|
||||
Message: 5
|
||||
Date: Fri, 20 Apr 2001 20:16:32 -0400
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
To: ppp@zzz.org
|
||||
From: barry@digicool.com (Barry A. Warsaw)
|
||||
Subject: [Ppp] testing #5
|
||||
Precedence: bulk
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
|
||||
|
||||
--__--__----
|
||||
--192.168.1.2.889.32614.987812255.500.21814
|
||||
Content-type: text/plain; charset=us-ascii
|
||||
Content-description: Digest Footer
|
||||
|
||||
_______________________________________________
|
||||
Ppp mailing list
|
||||
Ppp@zzz.org
|
||||
http://www.zzz.org/mailman/listinfo/ppp
|
||||
|
||||
|
||||
--192.168.1.2.889.32614.987812255.500.21814--
|
||||
|
||||
End of Ppp Digest
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
Return-Path: <bbb@zzz.org>
|
||||
Delivered-To: bbb@zzz.org
|
||||
Received: by mail.zzz.org (Postfix, from userid 889)
|
||||
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
|
||||
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
|
||||
From: bbb@ddd.com (John X. Doe)
|
||||
To: bbb@zzz.org
|
||||
Subject: This is a test message
|
||||
Date: Fri, 4 May 2001 14:05:44 -0400
|
||||
|
||||
|
||||
Hi,
|
||||
|
||||
Do you like this message?
|
||||
|
||||
-Me
|
|
@ -0,0 +1,37 @@
|
|||
Return-Path: <barry@python.org>
|
||||
Delivered-To: barry@python.org
|
||||
Received: by mail.python.org (Postfix, from userid 889)
|
||||
id C2BF0D37C6; Tue, 11 Sep 2001 00:05:05 -0400 (EDT)
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/mixed; boundary="h90VIIIKmx"
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Message-ID: <15261.36209.358846.118674@anthem.python.org>
|
||||
From: barry@python.org (Barry A. Warsaw)
|
||||
To: barry@python.org
|
||||
Subject: a simple multipart
|
||||
Date: Tue, 11 Sep 2001 00:05:05 -0400
|
||||
X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
|
||||
X-Attribution: BAW
|
||||
X-Oblique-Strategy: Make a door into a window
|
||||
|
||||
|
||||
--h90VIIIKmx
|
||||
Content-Type: text/plain
|
||||
Content-Disposition: inline;
|
||||
filename="msg.txt"
|
||||
Content-Transfer-Encoding: 7bit
|
||||
|
||||
a simple kind of mirror
|
||||
to reflect upon our own
|
||||
|
||||
--h90VIIIKmx
|
||||
Content-Type: text/plain
|
||||
Content-Disposition: inline;
|
||||
filename="msg.txt"
|
||||
Content-Transfer-Encoding: 7bit
|
||||
|
||||
a simple kind of mirror
|
||||
to reflect upon our own
|
||||
|
||||
--h90VIIIKmx--
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
From: foo
|
||||
Subject: bar
|
||||
To: baz
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/report; report-type=delivery-status;
|
||||
boundary="D1690A7AC1.996856090/mail.example.com"
|
||||
Message-Id: <20010803162810.0CA8AA7ACC@mail.example.com>
|
||||
|
||||
This is a MIME-encapsulated message.
|
||||
|
||||
--D1690A7AC1.996856090/mail.example.com
|
||||
Content-Type: text/plain
|
||||
|
||||
Yadda yadda yadda
|
||||
|
||||
--D1690A7AC1.996856090/mail.example.com
|
||||
|
||||
Yadda yadda yadda
|
||||
|
||||
--D1690A7AC1.996856090/mail.example.com
|
||||
Content-Type: message/rfc822
|
||||
|
||||
From: nobody@python.org
|
||||
|
||||
Yadda yadda yadda
|
||||
|
||||
--D1690A7AC1.996856090/mail.example.com--
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
Return-Path: <barry@python.org>
|
||||
Delivered-To: barry@python.org
|
||||
MIME-Version: 1.0
|
||||
Content-Type: message/rfc822
|
||||
Content-Description: forwarded message
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Message-ID: <15265.9482.641338.555352@python.org>
|
||||
From: barry@zope.com (Barry A. Warsaw)
|
||||
Sender: barry@python.org
|
||||
To: barry@python.org
|
||||
Subject: forwarded message from Barry A. Warsaw
|
||||
Date: Thu, 13 Sep 2001 17:28:42 -0400
|
||||
X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
|
||||
X-Attribution: BAW
|
||||
X-Oblique-Strategy: Be dirty
|
||||
X-Url: http://barry.wooz.org
|
||||
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=us-ascii
|
||||
Return-Path: <barry@python.org>
|
||||
Delivered-To: barry@python.org
|
||||
Message-ID: <15265.9468.713530.98441@python.org>
|
||||
From: barry@zope.com (Barry A. Warsaw)
|
||||
Sender: barry@python.org
|
||||
To: barry@python.org
|
||||
Subject: testing
|
||||
Date: Thu, 13 Sep 2001 17:28:28 -0400
|
||||
X-Mailer: VM 6.95 under 21.4 (patch 4) "Artificial Intelligence" XEmacs Lucid
|
||||
X-Attribution: BAW
|
||||
X-Oblique-Strategy: Spectrum analysis
|
||||
X-Url: http://barry.wooz.org
|
||||
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry <barry@digicool.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Here is your dingus fish
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
Hi there,
|
||||
|
||||
This is the dingus fish.
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: image/gif; name="dingusfish.gif"
|
||||
Content-Transfer-Encoding: base64
|
||||
content-disposition: attachment; filename="dingusfish.gif"
|
||||
|
||||
R0lGODdhAAEAAfAAAP///wAAACwAAAAAAAEAAQAC/oSPqcvtD6OctNqLs968+w+G4kiW5omm6sq2
|
||||
7gvH8kzX9o3n+s73/g8MCofEovGITGICTKbyCV0FDNOo9SqpQqpOrJfXzTQj2vD3TGtqL+NtGQ2f
|
||||
qTXmxzuOd7WXdcc9DyjU53ewFni4s0fGhdiYaEhGBelICTNoV1j5NUnFcrmUqemjNifJVWpaOqaI
|
||||
oFq3SspZsSraE7sHq3jr1MZqWvi662vxV4tD+pvKW6aLDOCLyur8PDwbanyDeq0N3DctbQYeLDvR
|
||||
RY6t95m6UB0d3mwIrV7e2VGNvjjffukeJp4w7F65KecGFsTHQGAygOrgrWs1jt28Rc88KESYcGLA
|
||||
/obvTkH6p+CinWJiJmIMqXGQwH/y4qk0SYjgQTczT3ajKZGfuI0uJ4kkVI/DT5s3/ejkxI0aT4Y+
|
||||
YTYgWbImUaXk9nlLmnSh1qJiJFl0OpUqRK4oOy7NyRQtHWofhoYVxkwWXKUSn0YsS+fUV6lhqfYb
|
||||
6ayd3Z5qQdG1B7bvQzaJjwUV2lixMUZ7JVsOlfjWVr/3NB/uFvnySBN6Dcb6rGwaRM3wsormw5cC
|
||||
M9NxWy/bWdufudCvy8bOAjXjVVwta/uO21sE5RHBCzNFXtgq9ORtH4eYjVP4Yryo026nvkFmCeyA
|
||||
B29efV6ravCMK5JwWd5897Qrx7ll38o6iHDZ/rXPR//feevhF4l7wjUGX3xq1eeRfM4RSJGBIV1D
|
||||
z1gKPkfWag3mVBVvva1RlX5bAJTPR/2YqNtw/FkIYYEi/pIZiAdpcxpoHtmnYYoZtvhUftzdx5ZX
|
||||
JSKDW405zkGcZzzGZ6KEv4FI224oDmijlEf+xp6MJK5ojY/ASeVUR+wsKRuJ+XFZ5o7ZeEime8t1
|
||||
ouUsU6YjF5ZtUihhkGfCdFQLWQFJ3UXxmElfhQnR+eCdcDbkFZp6vTRmj56ApCihn5QGpaToNZmR
|
||||
n3NVSpZcQpZ2KEONusaiCsKAug0wkQbJSFO+PTSjneGxOuFjPlUk3ovWvdIerjUg9ZGIOtGq/qeX
|
||||
eCYrrCX+1UPsgTKGGRSbzd5q156d/gpfbJxe66eD5iQKrXj7RGgruGxs62qebBHUKS32CKluCiqZ
|
||||
qh+pmehmEb71noAUoe5e9Zm17S7773V10pjrtG4CmuurCV/n6zLK5turWNhqOvFXbjhZrMD0YhKe
|
||||
wR0zOyuvsh6MWrGoIuzvyWu5y1WIFAqmJselypxXh6dKLNOKEB98L88bS2rkNqqlKzCNJp9c0G0j
|
||||
Gzh0iRrCbHSXmPR643QS+4rWhgFmnSbSuXCjS0xAOWkU2UdLqyuUNfHSFdUouy3bm5i5GnDM3tG8
|
||||
doJ4r5tqu3pPbRSVfvs8uJzeNXhp3n4j/tZ42SwH7eaWUUOjc3qFV9453UHTXZfcLH+OeNs5g36x
|
||||
lBnHvTm7EbMbLeuaLncao8vWCXimfo1o+843Ak6y4ChNeGntvAYvfLK4ezmoyNIbNCLTCXO9ZV3A
|
||||
E8/s88RczPzDwI4Ob7XZyl7+9Miban29h+tJZPrE21wgvBphDfrrfPdCTPKJD/y98L1rZwHcV6Jq
|
||||
Zab0metpuNIX/qAFPoz171WUaUb4HAhBSzHuHfjzHb3kha/2Cctis/ORArVHNYfFyYRH2pYIRzic
|
||||
isVOfPWD1b6mRTqpCRBozzof6UZVvFXRxWIr3GGrEviGYgyPMfahheiSaLs/9QeFu7oZ/ndSY8DD
|
||||
ya9x+uPed+7mxN2IzIISBOMLFYWVqC3Pew1T2nFuuCiwZS5/v6II10i4t1OJcUH2U9zxKodHsGGv
|
||||
Oa+zkvNUYUOa/TCCRutF9MzDwdlUMJADTCGSbDQ5OV4PTamDoPEi6Ecc/RF5RWwkcdSXvSOaDWSn
|
||||
I9LlvubFTQpuc6JKXLcKeb+xdbKRBnwREemXyjg6ME65aJiOuBgrktzykfPLJBKR9ClMavJ62/Ff
|
||||
BlNIyod9yX9wcSXexnXFpvkrbXk64xsx5Db7wXKP5fSgsvwIMM/9631VLBfkmtbHRXpqmtei52hG
|
||||
pUwSlo+BASQoeILDOBgREECxBBh5/iYmNsQ9dIv5+OI++QkqdsJPc3uykz5fkM+OraeekcQF7X4n
|
||||
B5S67za5U967PmooGQhUXfF7afXyCD7ONdRe17QogYjVx38uLwtrS6nhTnm15LQUnu9E2uK6CNI/
|
||||
1HOABj0ESwOjut4FEpFQpdNAm4K2LHnDWHNcmKB2ioKBogysVZtMO2nSxUdZ8Yk2kJc7URioLVI0
|
||||
YgmtIwZj4LoeKemgnOnbUdGnzZ4Oa6scqiolBGqS6RgWNLu0RMhcaE6rhhU4hiuqFXPAG8fGwTPW
|
||||
FKeLMtdVmXLSs5YJGF/YeVm7rREMlY3UYE+yCxbaMXX8y15m5zVHq6GOKDMynzII/jdUHdyVqIy0
|
||||
ifX2+r/EgtZcvRzSb72gU9ui87M2VecjKildW/aFqaYhKoryUjfB/g4qtyVuc60xFDGmCxwjW+qu
|
||||
zjuwl2GkOWn66+3QiiEctvd04OVvcCVzjgT7lrkvjVGKKHmmlDUKowSeikb5kK/mJReuWOxONx+s
|
||||
ULsl+Lqb0CVn0SrVyJ6wt4t6yTeSCafhPhAf0OXn6L60UMxiLolFAtmN35S2Ob1lZpQ1r/n0Qb5D
|
||||
oQ1zJiRVDgF8N3Q8TYfbi3DyWCy3lT1nxyBs6FT3S2GOzWRlxwKvlRP0RPJA9SjxEy0UoEnkA+M4
|
||||
cnzLMJrBGWLFEaaUb5lvpqbq/loOaU5+DFuHPxo82/OZuM8FXG3oVNZhtWpMpb/0Xu5m/LfLhHZQ
|
||||
7yuVI0MqZ7NE43imC8jH3IwGZlbPm0xkJYs7+2U48hXTsFSMqgGDvai0kLxyynKNT/waj+q1c1tz
|
||||
GjOpPBgdCSq3UKZxCSsqFIY+O6JbAWGWcV1pwqLyj5sGqCF1xb1F3varUWqrJv6cN3PrUXzijtfZ
|
||||
FshpBL3Xwr4GIPvU2N8EjrJgS1zl21rbXQMXeXc5jjFyrhpCzijSv/RQtyPSzHCFMhlME95fHglt
|
||||
pRsX+dfSQjUeHAlpWzJ5iOo79Ldnaxai6bXTcGO3fp07ri7HLEmXXPlYi8bv/qVxvNcdra6m7Rlb
|
||||
6JBTb5fd66VhFRjGArh2n7R1rDW4P5NOT9K0I183T2scYkeZ3q/VFyLb09U9ajzXBS8Kgkhc4mBS
|
||||
kYY9cy3Vy9lUnuNJH8HGIclUilwnBtjUOH0gteGOZ4c/XNrhXLSYDyxfnD8z1pDy7rYRvDolhnbe
|
||||
UMzxCZUs40s6s7UIvBnLgc0+vKuOkIXeOrDymlp+Zxra4MZLBbVrqD/jTJ597pDmnw5c4+DbyB88
|
||||
9Cg9DodYcSuMZT/114pptqc/EuTjRPvH/z5slzI3tluOEBBLqOXLOX+0I5929tO97wkvl/atCz+y
|
||||
xJrdwteW2FNW/NSmBP+f/maYtVs/bYyBC7Ox3jsYZHL05CIrBa/nS+b3bHfiYm4Ueil1YZZSgAUI
|
||||
fFZ1dxUmeA2oQRQ3RuGXNGLFV9/XbGFGPV6kfzk1TBBCd+izc7q1H+OHMJwmaBX2IQNYVAKHYepV
|
||||
SSGCe6CnbYHHETKGNe43EDvFgZr0gB/nVHPHZ80VV1ojOiI3XDvYIkl4ayo4bxQIgrFXWTvBI0nH
|
||||
VElWMuw2aLUWCRHHf8ymVCHjFlJnOSojfevCYyyyZDH0IcvHhrsnQ5O1OsWzONuVVKIxSxiFZ/tR
|
||||
fKDAf6xFTnw4O9Qig2VCfW2hJQrmMOuHW0W3dLQmCMO2ccdUd/xyfflH/olTiHZVdGwb8nIwRzSE
|
||||
J15jFlOJuBZBZ4CiyHyd2IFylFlB+HgHhYabhWOGwYO1ZH/Og1dtQlFMk352CGRSIFTapnWQEUtN
|
||||
l4zv8S0aaCFDyGCBqDUxZYpxGHX01y/JuH1xhn7TOCnNCI4eKDs5WGX4R425F4vF1o3BJ4vO0otq
|
||||
I3rimI7jJY1jISqnBxknCIvruF83mF5wN4X7qGLIhR8A2Vg0yFERSIXn9Vv3GHy3Vj/WIkKddlYi
|
||||
yIMv2I/VMjTLpW7pt05SWIZR0RPyxpB4SIUM9lBPGBl0GC7oSEEwRYLe4pJpZY2P0zbI1n+Oc44w
|
||||
qY3PUnmF0ixjVpDD/mJ9wpOBGTVgXlaCaZiPcIWK5NiKBIiPdGaQ0TWGvAiG7nMchdZb7Vgf8zNi
|
||||
MuMyzRdy/lePe9iC4TRx7WhhOQI/QiSVNAmAa2lT/piFbuh7ofJoYSZzrSZ1bvmWw3eN2nKUPVky
|
||||
uPN5/VRfohRd0VYZoqhKIlU6TXYhJxmPUIloAwc1bPmHEpaZYZORHNlXUJM07hATwHR8MJYqkwWR
|
||||
WaIezFhxSFlc8/Fq82hEnpeRozg3ULhhr9lAGtVEkCg5ZNRuuVleBPaZadhG0ZgkyPmDOTOKzViM
|
||||
YgOcpukKqQcbjAWS0IleQ2ROjdh6A+md1qWdBRSX7iSYgFRTtRmBpJioieXJiHfJiMGIR9fJOn8I
|
||||
MSfXYhspn4ooSa2mSAj4n+8Bmg03fBJZoPOJgsVZRxu1oOMRPXYYjdqjihFaEoZpXBREanuJoRI6
|
||||
cibFinq4ngUKh/wQd/H5ofYCZ0HJXR62opZFaAT0iFIZo4DIiUojkjeqKiuoZirKo5Y1a7AWckGa
|
||||
BkuYoD5lpDK6eUs6CkDqpETwl1EqpfhJpVeKpVl6EgUAADs=
|
||||
|
||||
--BOUNDARY--
|
|
@ -0,0 +1,24 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry Warsaw <barry@zope.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Lyrics
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/html; charset="iso-8859-1"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="iso-8859-2"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="koi8-r"
|
||||
|
||||
|
||||
--BOUNDARY--
|
|
@ -0,0 +1,24 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry Warsaw <barry@zope.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Lyrics
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/html; charset="iso-8859-1"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="koi8-r"
|
||||
|
||||
|
||||
--BOUNDARY--
|
|
@ -0,0 +1,39 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry Warsaw <barry@zope.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Lyrics
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
Content-Transfer-Encoding: 7bit
|
||||
|
||||
This is a 7bit encoded message.
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/html; charset="iso-8859-1"
|
||||
Content-Transfer-Encoding: Quoted-Printable
|
||||
|
||||
=A1This is a Quoted Printable encoded message!
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="iso-8859-1"
|
||||
Content-Transfer-Encoding: Base64
|
||||
|
||||
VGhpcyBpcyBhIEJhc2U2NCBlbmNvZGVkIG1lc3NhZ2Uu
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="iso-8859-1"
|
||||
Content-Transfer-Encoding: Base64
|
||||
|
||||
VGhpcyBpcyBhIEJhc2U2NCBlbmNvZGVkIG1lc3NhZ2UuCg==
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="iso-8859-1"
|
||||
|
||||
This has no Content-Transfer-Encoding: header.
|
||||
|
||||
--BOUNDARY--
|
|
@ -0,0 +1,7 @@
|
|||
Content-Type: message/rfc822
|
||||
MIME-Version: 1.0
|
||||
Subject: The enclosing message
|
||||
|
||||
Subject: An enclosed message
|
||||
|
||||
Here is the body of the message.
|
|
@ -0,0 +1,36 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry Warsaw <barry@zope.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Lyrics
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/html; charset="iso-8859-1"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: multipart/mixed; boundary="ANOTHER"
|
||||
|
||||
--ANOTHER
|
||||
Content-Type: text/plain; charset="iso-8859-2"
|
||||
|
||||
|
||||
--ANOTHER
|
||||
Content-Type: text/plain; charset="iso-8859-3"
|
||||
|
||||
--ANOTHER--
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="koi8-r"
|
||||
|
||||
|
||||
--BOUNDARY--
|
|
@ -0,0 +1,38 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry Warsaw <barry@zope.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Lyrics
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/html; charset="iso-8859-1"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: multipart/mixed; boundary="ANOTHER"
|
||||
|
||||
--ANOTHER
|
||||
Content-Type: text/plain; charset="iso-8859-2"
|
||||
|
||||
|
||||
--ANOTHER
|
||||
Content-Type: text/plain; charset="iso-8859-3"
|
||||
|
||||
|
||||
--ANOTHER--
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="koi8-r"
|
||||
|
||||
|
||||
--BOUNDARY--
|
|
@ -0,0 +1,94 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry <barry@digicool.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Here is your dingus fish
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="OUTER"
|
||||
|
||||
--OUTER
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
A text/plain part
|
||||
|
||||
--OUTER
|
||||
Content-Type: multipart/mixed; boundary=BOUNDARY
|
||||
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: text/plain; charset="us-ascii"
|
||||
|
||||
Hi there,
|
||||
|
||||
This is the dingus fish.
|
||||
|
||||
--BOUNDARY
|
||||
Content-Type: image/gif; name="dingusfish.gif"
|
||||
Content-Transfer-Encoding: base64
|
||||
content-disposition: attachment; filename="dingusfish.gif"
|
||||
|
||||
R0lGODdhAAEAAfAAAP///wAAACwAAAAAAAEAAQAC/oSPqcvtD6OctNqLs968+w+G4kiW5omm6sq2
|
||||
7gvH8kzX9o3n+s73/g8MCofEovGITGICTKbyCV0FDNOo9SqpQqpOrJfXzTQj2vD3TGtqL+NtGQ2f
|
||||
qTXmxzuOd7WXdcc9DyjU53ewFni4s0fGhdiYaEhGBelICTNoV1j5NUnFcrmUqemjNifJVWpaOqaI
|
||||
oFq3SspZsSraE7sHq3jr1MZqWvi662vxV4tD+pvKW6aLDOCLyur8PDwbanyDeq0N3DctbQYeLDvR
|
||||
RY6t95m6UB0d3mwIrV7e2VGNvjjffukeJp4w7F65KecGFsTHQGAygOrgrWs1jt28Rc88KESYcGLA
|
||||
/obvTkH6p+CinWJiJmIMqXGQwH/y4qk0SYjgQTczT3ajKZGfuI0uJ4kkVI/DT5s3/ejkxI0aT4Y+
|
||||
YTYgWbImUaXk9nlLmnSh1qJiJFl0OpUqRK4oOy7NyRQtHWofhoYVxkwWXKUSn0YsS+fUV6lhqfYb
|
||||
6ayd3Z5qQdG1B7bvQzaJjwUV2lixMUZ7JVsOlfjWVr/3NB/uFvnySBN6Dcb6rGwaRM3wsormw5cC
|
||||
M9NxWy/bWdufudCvy8bOAjXjVVwta/uO21sE5RHBCzNFXtgq9ORtH4eYjVP4Yryo026nvkFmCeyA
|
||||
B29efV6ravCMK5JwWd5897Qrx7ll38o6iHDZ/rXPR//feevhF4l7wjUGX3xq1eeRfM4RSJGBIV1D
|
||||
z1gKPkfWag3mVBVvva1RlX5bAJTPR/2YqNtw/FkIYYEi/pIZiAdpcxpoHtmnYYoZtvhUftzdx5ZX
|
||||
JSKDW405zkGcZzzGZ6KEv4FI224oDmijlEf+xp6MJK5ojY/ASeVUR+wsKRuJ+XFZ5o7ZeEime8t1
|
||||
ouUsU6YjF5ZtUihhkGfCdFQLWQFJ3UXxmElfhQnR+eCdcDbkFZp6vTRmj56ApCihn5QGpaToNZmR
|
||||
n3NVSpZcQpZ2KEONusaiCsKAug0wkQbJSFO+PTSjneGxOuFjPlUk3ovWvdIerjUg9ZGIOtGq/qeX
|
||||
eCYrrCX+1UPsgTKGGRSbzd5q156d/gpfbJxe66eD5iQKrXj7RGgruGxs62qebBHUKS32CKluCiqZ
|
||||
qh+pmehmEb71noAUoe5e9Zm17S7773V10pjrtG4CmuurCV/n6zLK5turWNhqOvFXbjhZrMD0YhKe
|
||||
wR0zOyuvsh6MWrGoIuzvyWu5y1WIFAqmJselypxXh6dKLNOKEB98L88bS2rkNqqlKzCNJp9c0G0j
|
||||
Gzh0iRrCbHSXmPR643QS+4rWhgFmnSbSuXCjS0xAOWkU2UdLqyuUNfHSFdUouy3bm5i5GnDM3tG8
|
||||
doJ4r5tqu3pPbRSVfvs8uJzeNXhp3n4j/tZ42SwH7eaWUUOjc3qFV9453UHTXZfcLH+OeNs5g36x
|
||||
lBnHvTm7EbMbLeuaLncao8vWCXimfo1o+843Ak6y4ChNeGntvAYvfLK4ezmoyNIbNCLTCXO9ZV3A
|
||||
E8/s88RczPzDwI4Ob7XZyl7+9Miban29h+tJZPrE21wgvBphDfrrfPdCTPKJD/y98L1rZwHcV6Jq
|
||||
Zab0metpuNIX/qAFPoz171WUaUb4HAhBSzHuHfjzHb3kha/2Cctis/ORArVHNYfFyYRH2pYIRzic
|
||||
isVOfPWD1b6mRTqpCRBozzof6UZVvFXRxWIr3GGrEviGYgyPMfahheiSaLs/9QeFu7oZ/ndSY8DD
|
||||
ya9x+uPed+7mxN2IzIISBOMLFYWVqC3Pew1T2nFuuCiwZS5/v6II10i4t1OJcUH2U9zxKodHsGGv
|
||||
Oa+zkvNUYUOa/TCCRutF9MzDwdlUMJADTCGSbDQ5OV4PTamDoPEi6Ecc/RF5RWwkcdSXvSOaDWSn
|
||||
I9LlvubFTQpuc6JKXLcKeb+xdbKRBnwREemXyjg6ME65aJiOuBgrktzykfPLJBKR9ClMavJ62/Ff
|
||||
BlNIyod9yX9wcSXexnXFpvkrbXk64xsx5Db7wXKP5fSgsvwIMM/9631VLBfkmtbHRXpqmtei52hG
|
||||
pUwSlo+BASQoeILDOBgREECxBBh5/iYmNsQ9dIv5+OI++QkqdsJPc3uykz5fkM+OraeekcQF7X4n
|
||||
B5S67za5U967PmooGQhUXfF7afXyCD7ONdRe17QogYjVx38uLwtrS6nhTnm15LQUnu9E2uK6CNI/
|
||||
1HOABj0ESwOjut4FEpFQpdNAm4K2LHnDWHNcmKB2ioKBogysVZtMO2nSxUdZ8Yk2kJc7URioLVI0
|
||||
YgmtIwZj4LoeKemgnOnbUdGnzZ4Oa6scqiolBGqS6RgWNLu0RMhcaE6rhhU4hiuqFXPAG8fGwTPW
|
||||
FKeLMtdVmXLSs5YJGF/YeVm7rREMlY3UYE+yCxbaMXX8y15m5zVHq6GOKDMynzII/jdUHdyVqIy0
|
||||
ifX2+r/EgtZcvRzSb72gU9ui87M2VecjKildW/aFqaYhKoryUjfB/g4qtyVuc60xFDGmCxwjW+qu
|
||||
zjuwl2GkOWn66+3QiiEctvd04OVvcCVzjgT7lrkvjVGKKHmmlDUKowSeikb5kK/mJReuWOxONx+s
|
||||
ULsl+Lqb0CVn0SrVyJ6wt4t6yTeSCafhPhAf0OXn6L60UMxiLolFAtmN35S2Ob1lZpQ1r/n0Qb5D
|
||||
oQ1zJiRVDgF8N3Q8TYfbi3DyWCy3lT1nxyBs6FT3S2GOzWRlxwKvlRP0RPJA9SjxEy0UoEnkA+M4
|
||||
cnzLMJrBGWLFEaaUb5lvpqbq/loOaU5+DFuHPxo82/OZuM8FXG3oVNZhtWpMpb/0Xu5m/LfLhHZQ
|
||||
7yuVI0MqZ7NE43imC8jH3IwGZlbPm0xkJYs7+2U48hXTsFSMqgGDvai0kLxyynKNT/waj+q1c1tz
|
||||
GjOpPBgdCSq3UKZxCSsqFIY+O6JbAWGWcV1pwqLyj5sGqCF1xb1F3varUWqrJv6cN3PrUXzijtfZ
|
||||
FshpBL3Xwr4GIPvU2N8EjrJgS1zl21rbXQMXeXc5jjFyrhpCzijSv/RQtyPSzHCFMhlME95fHglt
|
||||
pRsX+dfSQjUeHAlpWzJ5iOo79Ldnaxai6bXTcGO3fp07ri7HLEmXXPlYi8bv/qVxvNcdra6m7Rlb
|
||||
6JBTb5fd66VhFRjGArh2n7R1rDW4P5NOT9K0I183T2scYkeZ3q/VFyLb09U9ajzXBS8Kgkhc4mBS
|
||||
kYY9cy3Vy9lUnuNJH8HGIclUilwnBtjUOH0gteGOZ4c/XNrhXLSYDyxfnD8z1pDy7rYRvDolhnbe
|
||||
UMzxCZUs40s6s7UIvBnLgc0+vKuOkIXeOrDymlp+Zxra4MZLBbVrqD/jTJ597pDmnw5c4+DbyB88
|
||||
9Cg9DodYcSuMZT/114pptqc/EuTjRPvH/z5slzI3tluOEBBLqOXLOX+0I5929tO97wkvl/atCz+y
|
||||
xJrdwteW2FNW/NSmBP+f/maYtVs/bYyBC7Ox3jsYZHL05CIrBa/nS+b3bHfiYm4Ueil1YZZSgAUI
|
||||
fFZ1dxUmeA2oQRQ3RuGXNGLFV9/XbGFGPV6kfzk1TBBCd+izc7q1H+OHMJwmaBX2IQNYVAKHYepV
|
||||
SSGCe6CnbYHHETKGNe43EDvFgZr0gB/nVHPHZ80VV1ojOiI3XDvYIkl4ayo4bxQIgrFXWTvBI0nH
|
||||
VElWMuw2aLUWCRHHf8ymVCHjFlJnOSojfevCYyyyZDH0IcvHhrsnQ5O1OsWzONuVVKIxSxiFZ/tR
|
||||
fKDAf6xFTnw4O9Qig2VCfW2hJQrmMOuHW0W3dLQmCMO2ccdUd/xyfflH/olTiHZVdGwb8nIwRzSE
|
||||
J15jFlOJuBZBZ4CiyHyd2IFylFlB+HgHhYabhWOGwYO1ZH/Og1dtQlFMk352CGRSIFTapnWQEUtN
|
||||
l4zv8S0aaCFDyGCBqDUxZYpxGHX01y/JuH1xhn7TOCnNCI4eKDs5WGX4R425F4vF1o3BJ4vO0otq
|
||||
I3rimI7jJY1jISqnBxknCIvruF83mF5wN4X7qGLIhR8A2Vg0yFERSIXn9Vv3GHy3Vj/WIkKddlYi
|
||||
yIMv2I/VMjTLpW7pt05SWIZR0RPyxpB4SIUM9lBPGBl0GC7oSEEwRYLe4pJpZY2P0zbI1n+Oc44w
|
||||
qY3PUnmF0ixjVpDD/mJ9wpOBGTVgXlaCaZiPcIWK5NiKBIiPdGaQ0TWGvAiG7nMchdZb7Vgf8zNi
|
||||
MuMyzRdy/lePe9iC4TRx7WhhOQI/QiSVNAmAa2lT/piFbuh7ofJoYSZzrSZ1bvmWw3eN2nKUPVky
|
||||
uPN5/VRfohRd0VYZoqhKIlU6TXYhJxmPUIloAwc1bPmHEpaZYZORHNlXUJM07hATwHR8MJYqkwWR
|
||||
WaIezFhxSFlc8/Fq82hEnpeRozg3ULhhr9lAGtVEkCg5ZNRuuVleBPaZadhG0ZgkyPmDOTOKzViM
|
||||
YgOcpukKqQcbjAWS0IleQ2ROjdh6A+md1qWdBRSX7iSYgFRTtRmBpJioieXJiHfJiMGIR9fJOn8I
|
||||
MSfXYhspn4ooSa2mSAj4n+8Bmg03fBJZoPOJgsVZRxu1oOMRPXYYjdqjihFaEoZpXBREanuJoRI6
|
||||
cibFinq4ngUKh/wQd/H5ofYCZ0HJXR62opZFaAT0iFIZo4DIiUojkjeqKiuoZirKo5Y1a7AWckGa
|
||||
BkuYoD5lpDK6eUs6CkDqpETwl1EqpfhJpVeKpVl6EgUAADs=
|
||||
|
||||
--BOUNDARY--
|
||||
|
||||
--OUTER--
|
|
@ -0,0 +1,23 @@
|
|||
Return-Path: <bbb@zzz.org>
|
||||
Delivered-To: bbb@zzz.org
|
||||
Received: by mail.zzz.org (Postfix, from userid 889)
|
||||
id 27CEAD38CC; Fri, 4 May 2001 14:05:44 -0400 (EDT)
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text; charset=us-ascii
|
||||
Content-Transfer-Encoding: 7bit
|
||||
Message-ID: <15090.61304.110929.45684@aaa.zzz.org>
|
||||
From: bbb@ddd.com (John X. Doe)
|
||||
To: bbb@zzz.org
|
||||
Subject: This is a test message
|
||||
Date: Fri, 4 May 2001 14:05:44 -0400
|
||||
|
||||
|
||||
Hi,
|
||||
|
||||
I'm sorry but I'm using a drainbread ISP, which although big and
|
||||
wealthy can't seem to generate standard compliant email. :(
|
||||
|
||||
This message has a Content-Type: header with no subtype. I hope you
|
||||
can still read it.
|
||||
|
||||
-Me
|
|
@ -0,0 +1,52 @@
|
|||
Return-Path: <xx@xx.dk>
|
||||
Received: from fepD.post.tele.dk (195.41.46.149) by mail.groupcare.dk (LSMTP for Windows NT v1.1b) with SMTP id <0.0014F8A2@mail.groupcare.dk>; Mon, 30 Apr 2001 12:17:50 +0200
|
||||
User-Agent: Microsoft-Outlook-Express-Macintosh-Edition/5.02.2106
|
||||
Subject: XX
|
||||
From: xx@xx.dk
|
||||
To: XX
|
||||
Message-ID: <xxxx>
|
||||
Mime-version: 1.0
|
||||
Content-type: multipart/mixed;
|
||||
boundary="MS_Mac_OE_3071477847_720252_MIME_Part"
|
||||
|
||||
> Denne meddelelse er i MIME-format. Da dit postl¾sningsprogram ikke forstŒr dette format, kan del af eller hele meddelelsen v¾re ul¾selig.
|
||||
|
||||
--MS_Mac_OE_3071477847_720252_MIME_Part
|
||||
Content-type: multipart/alternative;
|
||||
boundary="MS_Mac_OE_3071477847_720252_MIME_Part"
|
||||
|
||||
|
||||
--MS_Mac_OE_3071477847_720252_MIME_Part
|
||||
Content-type: text/plain; charset="ISO-8859-1"
|
||||
Content-transfer-encoding: quoted-printable
|
||||
|
||||
Some removed test.
|
||||
|
||||
--MS_Mac_OE_3071477847_720252_MIME_Part
|
||||
Content-type: text/html; charset="ISO-8859-1"
|
||||
Content-transfer-encoding: quoted-printable
|
||||
|
||||
<HTML>
|
||||
<HEAD>
|
||||
<TITLE>Some removed HTML</TITLE>
|
||||
</HEAD>
|
||||
<BODY>
|
||||
Some removed text.
|
||||
</BODY>
|
||||
</HTML>
|
||||
|
||||
|
||||
--MS_Mac_OE_3071477847_720252_MIME_Part--
|
||||
|
||||
|
||||
--MS_Mac_OE_3071477847_720252_MIME_Part
|
||||
Content-type: image/gif; name="xx.gif";
|
||||
x-mac-creator="6F676C65";
|
||||
x-mac-type="47494666"
|
||||
Content-disposition: attachment
|
||||
Content-transfer-encoding: base64
|
||||
|
||||
Some removed base64 encoded chars.
|
||||
|
||||
--MS_Mac_OE_3071477847_720252_MIME_Part--
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
Return-Path: <>
|
||||
Delivered-To: scr-admin@socal-raves.org
|
||||
Received: from cougar.noc.ucla.edu (cougar.noc.ucla.edu [169.232.10.18])
|
||||
by babylon.socal-raves.org (Postfix) with ESMTP id CCC2C51B84
|
||||
for <scr-admin@socal-raves.org>; Sun, 23 Sep 2001 20:13:54 -0700 (PDT)
|
||||
Received: from sims-ms-daemon by cougar.noc.ucla.edu
|
||||
(Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
|
||||
id <0GK500B01D0B8Y@cougar.noc.ucla.edu> for scr-admin@socal-raves.org; Sun,
|
||||
23 Sep 2001 20:14:35 -0700 (PDT)
|
||||
Received: from cougar.noc.ucla.edu
|
||||
(Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
|
||||
id <0GK500B01D0B8X@cougar.noc.ucla.edu>; Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
|
||||
Date: Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
|
||||
From: Internet Mail Delivery <postmaster@ucla.edu>
|
||||
Subject: Delivery Notification: Delivery has failed
|
||||
To: scr-admin@socal-raves.org
|
||||
Message-id: <0GK500B04D0B8X@cougar.noc.ucla.edu>
|
||||
MIME-version: 1.0
|
||||
Sender: scr-owner@socal-raves.org
|
||||
Errors-To: scr-owner@socal-raves.org
|
||||
X-BeenThere: scr@socal-raves.org
|
||||
X-Mailman-Version: 2.1a3
|
||||
Precedence: bulk
|
||||
List-Help: <mailto:scr-request@socal-raves.org?subject=help>
|
||||
List-Post: <mailto:scr@socal-raves.org>
|
||||
List-Subscribe: <http://socal-raves.org/mailman/listinfo/scr>,
|
||||
<mailto:scr-request@socal-raves.org?subject=subscribe>
|
||||
List-Id: SoCal-Raves <scr.socal-raves.org>
|
||||
List-Unsubscribe: <http://socal-raves.org/mailman/listinfo/scr>,
|
||||
<mailto:scr-request@socal-raves.org?subject=unsubscribe>
|
||||
List-Archive: <http://socal-raves.org/mailman/private/scr/>
|
||||
Content-Type: multipart/report; boundary="Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)"
|
||||
|
||||
|
||||
--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
|
||||
Content-type: text/plain; charset=ISO-8859-1
|
||||
|
||||
This report relates to a message you sent with the following header fields:
|
||||
|
||||
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
|
||||
Date: Sun, 23 Sep 2001 20:10:55 -0700
|
||||
From: "Ian T. Henry" <henryi@oxy.edu>
|
||||
To: SoCal Raves <scr@socal-raves.org>
|
||||
Subject: [scr] yeah for Ians!!
|
||||
|
||||
Your message cannot be delivered to the following recipients:
|
||||
|
||||
Recipient address: jangel1@cougar.noc.ucla.edu
|
||||
Reason: recipient reached disk quota
|
||||
|
||||
|
||||
--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
|
||||
Content-type: message/DELIVERY-STATUS
|
||||
|
||||
Original-envelope-id: 0GK500B4HD0888@cougar.noc.ucla.edu
|
||||
Reporting-MTA: dns; cougar.noc.ucla.edu
|
||||
|
||||
Action: failed
|
||||
Status: 5.0.0 (recipient reached disk quota)
|
||||
Original-recipient: rfc822;jangel1@cougar.noc.ucla.edu
|
||||
Final-recipient: rfc822;jangel1@cougar.noc.ucla.edu
|
||||
|
||||
--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)
|
||||
Content-type: MESSAGE/RFC822
|
||||
|
||||
Return-path: scr-admin@socal-raves.org
|
||||
Received: from sims-ms-daemon by cougar.noc.ucla.edu
|
||||
(Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
|
||||
id <0GK500B01D0B8X@cougar.noc.ucla.edu>; Sun, 23 Sep 2001 20:14:35 -0700 (PDT)
|
||||
Received: from panther.noc.ucla.edu by cougar.noc.ucla.edu
|
||||
(Sun Internet Mail Server sims.3.5.2000.03.23.18.03.p10)
|
||||
with ESMTP id <0GK500B4GD0888@cougar.noc.ucla.edu> for jangel1@sims-ms-daemon;
|
||||
Sun, 23 Sep 2001 20:14:33 -0700 (PDT)
|
||||
Received: from babylon.socal-raves.org
|
||||
(ip-209-85-222-117.dreamhost.com [209.85.222.117])
|
||||
by panther.noc.ucla.edu (8.9.1a/8.9.1) with ESMTP id UAA09793 for
|
||||
<jangel1@ucla.edu>; Sun, 23 Sep 2001 20:14:32 -0700 (PDT)
|
||||
Received: from babylon (localhost [127.0.0.1]) by babylon.socal-raves.org
|
||||
(Postfix) with ESMTP id D3B2951B70; Sun, 23 Sep 2001 20:13:47 -0700 (PDT)
|
||||
Received: by babylon.socal-raves.org (Postfix, from userid 60001)
|
||||
id A611F51B82; Sun, 23 Sep 2001 20:13:46 -0700 (PDT)
|
||||
Received: from tiger.cc.oxy.edu (tiger.cc.oxy.edu [134.69.3.112])
|
||||
by babylon.socal-raves.org (Postfix) with ESMTP id ADA7351B70 for
|
||||
<scr@socal-raves.org>; Sun, 23 Sep 2001 20:13:44 -0700 (PDT)
|
||||
Received: from ent (n16h86.dhcp.oxy.edu [134.69.16.86])
|
||||
by tiger.cc.oxy.edu (8.8.8/8.8.8) with SMTP id UAA08100 for
|
||||
<scr@socal-raves.org>; Sun, 23 Sep 2001 20:14:24 -0700 (PDT)
|
||||
Date: Sun, 23 Sep 2001 20:10:55 -0700
|
||||
From: "Ian T. Henry" <henryi@oxy.edu>
|
||||
Subject: [scr] yeah for Ians!!
|
||||
Sender: scr-admin@socal-raves.org
|
||||
To: SoCal Raves <scr@socal-raves.org>
|
||||
Errors-to: scr-admin@socal-raves.org
|
||||
Message-id: <002001c144a6$8752e060$56104586@oxy.edu>
|
||||
MIME-version: 1.0
|
||||
X-Mailer: Microsoft Outlook Express 5.50.4522.1200
|
||||
Content-type: text/plain; charset=us-ascii
|
||||
Precedence: bulk
|
||||
Delivered-to: scr-post@babylon.socal-raves.org
|
||||
Delivered-to: scr@socal-raves.org
|
||||
X-Converted-To-Plain-Text: from multipart/alternative by demime 0.98e
|
||||
X-Converted-To-Plain-Text: Alternative section used was text/plain
|
||||
X-BeenThere: scr@socal-raves.org
|
||||
X-Mailman-Version: 2.1a3
|
||||
List-Help: <mailto:scr-request@socal-raves.org?subject=help>
|
||||
List-Post: <mailto:scr@socal-raves.org>
|
||||
List-Subscribe: <http://socal-raves.org/mailman/listinfo/scr>,
|
||||
<mailto:scr-request@socal-raves.org?subject=subscribe>
|
||||
List-Id: SoCal-Raves <scr.socal-raves.org>
|
||||
List-Unsubscribe: <http://socal-raves.org/mailman/listinfo/scr>,
|
||||
<mailto:scr-request@socal-raves.org?subject=unsubscribe>
|
||||
List-Archive: <http://socal-raves.org/mailman/private/scr/>
|
||||
|
||||
I always love to find more Ian's that are over 3 years old!!
|
||||
|
||||
Ian
|
||||
_______________________________________________
|
||||
For event info, list questions, or to unsubscribe, see http://www.socal-raves.org/
|
||||
|
||||
|
||||
|
||||
--Boundary_(ID_PGS2F2a+z+/jL7hupKgRhA)--
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
MIME-Version: 1.0
|
||||
From: Barry <barry@digicool.com>
|
||||
To: Dingus Lovers <cravindogs@cravindogs.com>
|
||||
Subject: Here is your dingus fish
|
||||
Date: Fri, 20 Apr 2001 19:35:02 -0400
|
||||
Content-Type: multipart/mixed; boundary="BOUNDARY"
|
||||
|
||||
Hi there,
|
||||
|
||||
This is the dingus fish.
|
||||
|
||||
[Non-text (image/gif) part of message omitted, filename dingusfish.gif]
|
|
@ -0,0 +1,6 @@
|
|||
Content-Type: text/plain; charset="us-ascii"
|
||||
MIME-Version: 1.0
|
||||
Content-Transfer-Encoding: 7bit
|
||||
X-Foobar-Spoink-Defrobnit: wasnipoop; giraffes="very-long-necked-animals";
|
||||
spooge="yummy"; hippos="gargantuan"; marshmallows="gooey"
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
Send Ppp mailing list submissions to
|
||||
ppp@zzz.org
|
||||
|
||||
To subscribe or unsubscribe via the World Wide Web, visit
|
||||
http://www.zzz.org/mailman/listinfo/ppp
|
||||
or, via email, send a message with subject or body 'help' to
|
||||
ppp-request@zzz.org
|
||||
|
||||
You can reach the person managing the list at
|
||||
ppp-admin@zzz.org
|
||||
|
||||
When replying, please edit your Subject line so it is more specific
|
||||
than "Re: Contents of Ppp digest..."
|
||||
|
||||
Today's Topics:
|
||||
|
||||
1. testing #1 (Barry A. Warsaw)
|
||||
2. testing #2 (Barry A. Warsaw)
|
||||
3. testing #3 (Barry A. Warsaw)
|
||||
4. testing #4 (Barry A. Warsaw)
|
||||
5. testing #5 (Barry A. Warsaw)
|
||||
|
||||
hello
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
hello
|
||||
|
||||
|
||||
|
||||
_______________________________________________
|
||||
Ppp mailing list
|
||||
Ppp@zzz.org
|
||||
http://www.zzz.org/mailman/listinfo/ppp
|
||||
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue