diff options
author | Mike Crute <mike@crute.us> | 2019-05-21 13:20:16 +0000 |
---|---|---|
committer | Mike Crute <mike@crute.us> | 2019-05-21 13:20:16 +0000 |
commit | 63fbf8758a9d6aa6127eaebd5581aef4e3cd4f46 (patch) | |
tree | 7718303d6c1f8e9606d9564356e9f16b1df1e4e4 | |
download | website_from_wiki-master.tar.bz2 website_from_wiki-master.tar.xz website_from_wiki-master.zip |
-rw-r--r-- | wiki_upload_test.py | 223 |
1 files changed, 223 insertions, 0 deletions
diff --git a/wiki_upload_test.py b/wiki_upload_test.py new file mode 100644 index 0000000..2c256b4 --- /dev/null +++ b/wiki_upload_test.py | |||
@@ -0,0 +1,223 @@ | |||
1 | def get_wiki(path): | ||
2 | import imp, os.path | ||
3 | return imp.load_module('wiki', open(os.path.expanduser(path)), path, | ||
4 | [s for s in imp.get_suffixes() if s[0] == '.py'][0]) | ||
5 | |||
6 | |||
7 | wiki = get_wiki('~/bin/viwiki') | ||
8 | |||
9 | |||
10 | import re | ||
11 | |||
12 | try: | ||
13 | from io import StringIO | ||
14 | except ImportError: | ||
15 | from StringIO import StringIO | ||
16 | |||
17 | |||
18 | class RawWikiWrapper(wiki.WikiAPI): | ||
19 | |||
20 | def __init__(self, profile): | ||
21 | super(RawWikiWrapper, self).__init__(*wiki.get_credentials(profile)) | ||
22 | |||
23 | def list_pages_by_prefix(self, prefix): | ||
24 | return self("getAllPagesEx", { "prefix": prefix }) | ||
25 | |||
26 | def list_attachments(self, page): | ||
27 | return self("listAttachments", page) | ||
28 | |||
29 | def get_attachment(self, page, attachment): | ||
30 | return self("getAttachment", page, attachment) | ||
31 | |||
32 | def get_page(self, page): | ||
33 | return self("getPage", page) | ||
34 | |||
35 | def get_page_info(self, page): | ||
36 | return self("getPageInfo", page) | ||
37 | |||
38 | def get_processing_instruction(self, page, pi): | ||
39 | return self("getProcessingInstruction", page, pi) | ||
40 | |||
41 | |||
42 | class WikiWrapper(RawWikiWrapper): | ||
43 | |||
44 | def __init__(self, *args): | ||
45 | super(WikiWrapper, self).__init__(*args) | ||
46 | self._api = super(WikiWrapper, self) | ||
47 | |||
48 | def list_pages_by_prefix(self, prefix): | ||
49 | return [ | ||
50 | WikiPage(name, self._api) | ||
51 | for name in super(WikiWrapper, self).list_pages_by_prefix(prefix) | ||
52 | ] | ||
53 | |||
54 | def get_page(self, page): | ||
55 | return WikiPage(page, self._api) | ||
56 | |||
57 | |||
58 | class WikiPage(object): | ||
59 | |||
60 | def __init__(self, name, api, contents=None): | ||
61 | self.name = name | ||
62 | self._api = api | ||
63 | self._contents = contents | ||
64 | self._meta = None | ||
65 | self._pis = None | ||
66 | |||
67 | def __repr__(self): | ||
68 | return "{}({!r}, {!r})".format( | ||
69 | self.__class__.__name__, self.name, self._api) | ||
70 | |||
71 | def attachments(self): | ||
72 | return [ | ||
73 | WikiAttachment(self.name, filename, self) | ||
74 | for filename in self._api.list_attachments(self.name) | ||
75 | ] | ||
76 | |||
77 | @staticmethod | ||
78 | def _parse_pi(line): | ||
79 | line = line[1:].strip() | ||
80 | type, args = line.split(" ", 1) | ||
81 | |||
82 | if type == "pragma": | ||
83 | return type, args | ||
84 | elif type == "#": | ||
85 | return None | ||
86 | else: | ||
87 | return type, args.split(" ") | ||
88 | |||
89 | def _parse(self, contents): | ||
90 | buffer = StringIO() | ||
91 | have_body = False | ||
92 | pis = {} | ||
93 | |||
94 | for line in contents.split("\n"): | ||
95 | if not have_body and line.startswith("#"): | ||
96 | res = self._parse_pi(line) | ||
97 | if res: | ||
98 | pis[res[0]] = res[1] | ||
99 | elif not have_body and not line.startswith("#"): | ||
100 | self.have_body = True | ||
101 | buffer.write(u"{}\n".format(line)) | ||
102 | else: | ||
103 | buffer.write(u"{}\n".format(line)) | ||
104 | |||
105 | return buffer.getvalue(), pis | ||
106 | |||
107 | @property | ||
108 | def contents(self): | ||
109 | if not self._contents: | ||
110 | self._contents , self._pis = self._parse( | ||
111 | self._api.get_page(self.name)) | ||
112 | return self._contents | ||
113 | |||
114 | @property | ||
115 | def meta(self): | ||
116 | if not self._meta: | ||
117 | self._meta = self._api.get_page_info(self.name) | ||
118 | return self._meta | ||
119 | |||
120 | @property | ||
121 | def last_modified(self): | ||
122 | return self.meta["lastModified"] | ||
123 | |||
124 | @property | ||
125 | def version(self): | ||
126 | return self.meta["version"] | ||
127 | |||
128 | @property | ||
129 | def author(self): | ||
130 | return self.meta["author"].split(":")[-1] | ||
131 | |||
132 | @property | ||
133 | def format(self): | ||
134 | return self._pis.get("format", ["rst"])[0] | ||
135 | |||
136 | |||
137 | class WikiAttachment(object): | ||
138 | |||
139 | def __init__(self, page, filename, api): | ||
140 | self.page = page | ||
141 | self.filename = filename | ||
142 | self._api = api | ||
143 | |||
144 | def __repr__(self): | ||
145 | return "{}({!r}, {!r}, {!r})".format( | ||
146 | self.__class__.__name__, self.page, self.filename, self._api) | ||
147 | |||
148 | def get_contents(self): | ||
149 | return self._api.get_attachment(self.page, self.filename) | ||
150 | |||
151 | |||
152 | class PageTree(object): | ||
153 | |||
154 | def __init__(self, root_path): | ||
155 | self.root_path = root_path | ||
156 | |||
157 | |||
158 | class PageNode(object): | ||
159 | |||
160 | def __init__(self, page=None): | ||
161 | self.page = page | ||
162 | self.children = [] | ||
163 | |||
164 | def file_name(self, root_path): | ||
165 | return "/".join([ | ||
166 | re.sub("(.)([A-Z]+)", r"\1-\2", p).lower() | ||
167 | for p in self.page.name[len(root_path):].split("/") | ||
168 | ]) | ||
169 | |||
170 | # Metadata: | ||
171 | # Site Title | ||
172 | # Footer Link (n) | ||
173 | # Date | ||
174 | # Author | ||
175 | # Tag | ||
176 | # Template (filename) | ||
177 | # Renderer (Page, Blog) | ||
178 | |||
179 | # Page tree: | ||
180 | # Keep metadata attributes from parents | ||
181 | # Add robots (/robots.txt) | ||
182 | # Add sitemap (/sitemap.xml) (https://www.sitemaps.org/protocol.html) | ||
183 | |||
184 | # Site: | ||
185 | # Load template | ||
186 | # Render page | ||
187 | # Render text-only page | ||
188 | # Set copyright date | ||
189 | |||
190 | # Home Page: | ||
191 | # Extract bottom site links | ||
192 | # Extract page header | ||
193 | # Extract site title | ||
194 | |||
195 | # All pages: | ||
196 | # Handle attachments | ||
197 | # Embed images | ||
198 | # Rewrite links | ||
199 | # Extract title | ||
200 | # Extract author | ||
201 | # Extract date | ||
202 | # Extract tags | ||
203 | # Extract lat updated date for footer | ||
204 | |||
205 | # Blog: | ||
206 | # Full post list page (/blog) | ||
207 | # By date page (/archive) | ||
208 | # Tags page (/tags) | ||
209 | # Atom feed (/feed, /atom) | ||
210 | # Rss feed (/rss) | ||
211 | |||
212 | |||
213 | if __name__ == "__main__": | ||
214 | api = WikiWrapper("crute") | ||
215 | |||
216 | #print(api.get_page("MikeCruteWebsite")) | ||
217 | #print(api.list_pages_by_prefix("MikeCruteWebsite/")) | ||
218 | #print(api.list_attachments("MikeCruteWebsite/Talks/ClePyAST")) | ||
219 | #print(api.get_attachment("MikeCruteWebsite/Talks/ClePyAST", "clepy-python-ast.pdf")) | ||
220 | |||
221 | page = api.get_page("MikeCruteWebsite/Talks/ClepyAst") | ||
222 | |||
223 | import pdb; pdb.set_trace(); print("") | ||