python搭建微信公眾平臺
更新時間:2016年02月09日 19:35:34 投稿:lijiao
這篇文章主要介紹了python搭建微信公眾平臺的相關(guān)資料和技巧,感興趣的朋友可以參考一下
python基于新浪sae開發(fā)的微信公眾平臺,實現(xiàn)功能:
輸入段子---回復笑話
輸入開源+文章---發(fā)送消息到開源中國
輸入快遞+訂單號---查詢快遞信息
輸入天氣---查詢南京最近五天天氣狀況
輸入微博熱點---回復微博當前熱門話題
輸入電影+名稱---回復百度云盤中搜索的鏈接
具體實現(xiàn)代碼:
# -*- coding: utf-8 -*-
import hashlib
import web
import lxml
import time
import os
import urllib2,json
import urllib
import re
import random
import hashlib
import cookielib
from urllib import urlencode
from lxml import etree
class WeixinInterface:
def __init__(self):
self.app_root = os.path.dirname(__file__)
self.templates_root = os.path.join(self.app_root, 'templates')
self.render = web.template.render(self.templates_root)
def GET(self):
#獲取輸入?yún)?shù)
data = web.input()
signature=data.signature
timestamp=data.timestamp
nonce=data.nonce
echostr=data.echostr
#自己的token
token="weixin9047" #這里改寫你在微信公眾平臺里輸入的token
#字典序排序
list=[token,timestamp,nonce]
list.sort()
sha1=hashlib.sha1()
map(sha1.update,list)
hashcode=sha1.hexdigest()
#sha1加密算法
#如果是來自微信的請求,則回復echostr
if hashcode == signature:
return echostr
def POST(self):
str_xml = web.data() #獲得post來的數(shù)據(jù)
xml = etree.fromstring(str_xml)#進行XML解析
content=xml.find("Content").text#獲得用戶所輸入的內(nèi)容
msgType=xml.find("MsgType").text
fromUser=xml.find("FromUserName").text
toUser=xml.find("ToUserName").text
if(content == u"天氣"):
url = "http://m.ip138.com/21/nanjing/tianqi/"
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req = urllib2.Request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read()
rex = r'(?<=img src="/image/s[0-9].gif" alt=").{1,6}(?=" />)'
rexx = r'(?<=div class="temperature">).{5,15}(?=</div>)'
n = re.findall(rex,html)
m = re.findall(rexx,html)
str_wether = ""
for (i,j) in zip(m,n):
str_wether = str_wether + j + " " +i + "\n"
return self.render.reply_text(fromUser,toUser,int(time.time()),"最近五天天氣:\n"+str_wether)
elif(content[0:2] == u"電影"):
keyword = urllib.quote(content[2:].encode("utf-8"))
url = "http://www.wangpansou.cn/s.php?q="+keyword
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req = urllib2.Request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read()
rex = r'https?://pan.baidu.com.*\?uk=[0-9]{10}.*[\d+?]"'
m = re.findall(rex,html)
string = u""
for i in m:
string = string + i + "\n"
return self.render.reply_text(fromUser,toUser,int(time.time()),u"以下是電影鏈接:\n"+string)
elif(u"段子" in content):
url_8 = "http://www.qiushibaike.com/"
url_24 = "http://www.qiushibaike.com/hot/"
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req_8 = urllib2.Request(url_8, headers = headers)
req_24 = urllib2.Request(url_24,headers = headers)
opener_8 = urllib2.urlopen(req_8)
opener_24 = urllib2.urlopen(req_24)
html_8 = opener_8.read()
html_24 = opener_24.read()
rex = r'(?<=div class="content">).*?(?=<!--)'
m_8 = re.findall(rex,html_8,re.S)
m_24 = re.findall(rex, html_24, re.S)
m_8.extend(m_24)
random.shuffle(m_8)
return self.render.reply_text(fromUser,toUser,int(time.time()),m_8[0].replace('<br/>',''))
elif(content[0:2] == u"開源"):
url = "https://www.oschina.net/action/user/hash_login"
urll = "http://www.oschina.net/action/tweet/pub"
username = "904727147@qq.com"
passw = ""
password = hashlib.sha1(passw).hexdigest()
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]
urllib2.install_opener(opener)
data = {'email':username,'pwd':password}
data_post = urllib.urlencode(data)
opener.open(url, data_post)
user = "2391943"
msg = content[2:].encode("utf-8")
user_code = "lPFz26r3ZIa1e3KyIWlzPNpJlaEmZqyh6dAWAotd"
post = {'user_code':user_code,'user':user,'msg':msg}
msg_post = urllib.urlencode(post)
html = urllib2.urlopen(urll,msg_post).read()
return self.render.reply_text(fromUser,toUser,int(time.time()),u"發(fā)送到開源中國動彈成功!")
elif(content[0:2] == u"快遞"):
keyword = content[2:]
url = "http://www.kuaidi100.com/autonumber/autoComNum?text="+keyword
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]
urllib2.install_opener(opener)
html = urllib2.urlopen(url).read()
jo = json.loads(html)
typ = jo["auto"][0]['comCode']
if(typ is None):
return self.render.reply_text(fromUser,toUser,int(time.time()),u"請檢查你的定單號!")
urll = "http://www.kuaidi100.com/query?type="+typ+"&postid="+keyword
html_end = urllib2.urlopen(urll).read()
jo_end = json.loads(html_end)
if(jo_end["status"] == "201"):
return self.render.reply_text(fromUser,toUser,int(time.time()),u"訂單號輸入有誤,請重新輸入!")
text = jo_end["data"]
string = u""
for i in text:
string = string + i["time"] + i["context"] + "\n"
return self.render.reply_text(fromUser,toUser,int(time.time()),string)
elif(content == u"微博熱點"):
url = "http://weibo.cn/pub/?tf=5_005"
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'}
req = urllib2.Request(url, headers = headers)
opener = urllib2.urlopen(req)
html = opener.read().decode("utf-8")
rex = r'(?<=div class="c"><a href=").{60,79}(?=</a>)'
ss = re.findall(rex,html)
string = u""
for i in ss:
string = string + i.replace('>','\n')+"\n"
return self.render.reply_text(fromUser,toUser,int(time.time()),string.replace('"',''))
elif(content == u"知乎信息"):
username = '18362983803'
password = ''
_xsrf='558c1b60725377c5810ae2484b26781e'
url = r'https://www.zhihu.com/login/phone_num'
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]
data = urllib.urlencode({"phone_num":username,"password":password,'_xsrf':_xsrf})
opener.open(url,data)
html = opener.open('https://www.zhihu.com/noti7/new?r=1454793308655').read()
jo = json.loads(html)
data = jo[1]
string = "增長了:"+str(data[0])+"個評論"+str(data[1])+"個粉絲"+str(data[2])+"個贊同"
return self.render.reply_text(fromUser,toUser,int(time.time()),string)
elif(u"鐘志遠" in content):
return self.render.reply_text(fromUser,toUser,int(time.time()),u"你想找全世界最帥的人干嘛?如果你是妹子,請加微信18362983803!漢子繞道!")
elif(u"使用" in content):
return self.render.reply_text(fromUser,toUser,int(time.time()),u"搜電影:電影+電影名,最近天氣:天氣,微博熱門:微博熱點,快遞查詢:快遞+單號,看笑話:段子,發(fā)送動彈到開源中國:開源+內(nèi)容")
else:
url = r'http://www.xiaohuangji.com/ajax.php'
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0 Iceweasel/38.3.0')]
string = urllib.quote(content.encode("utf-8"))
try:
data = urllib.urlencode({"para":string})
html = opener.open(url,data).read()
string = html+"\n----[回復[使用]]"
return self.render.reply_text(fromUser,toUser,int(time.time()),string)
except Exception,ex:
return self.render.reply_text(fromUser,toUser,int(time.time()),u"我不想理你了~")
以上就是本文的全部內(nèi)容,希望對大家的學習有所幫助。
相關(guān)文章
Python學習之.iloc與.loc的區(qū)別、聯(lián)系和用法
loc和iloc都是pandas工具中定位某一行的函數(shù),下面這篇文章主要給大家介紹了關(guān)于Python學習之.iloc與.loc的區(qū)別、聯(lián)系和用法的相關(guān)資料,文中通過實例代碼介紹的非常詳細,需要的朋友可以參考下2022-05-05
基于Python+Flask設計實現(xiàn)AI智能天氣助手系統(tǒng)
這篇文章主要為大家詳細介紹了如何基于Python和Flask設計實現(xiàn)一個AI智能天氣助手系統(tǒng),文中的示例代碼講解詳細,感興趣的小伙伴可以參考一下2025-03-03
淺談keras使用預訓練模型vgg16分類,損失和準確度不變
這篇文章主要介紹了淺談keras使用預訓練模型vgg16分類,損失和準確度不變,具有很好的參考價值,希望對大家有所幫助。一起跟隨小編小編過來看看吧2020-07-07

