See also ebooksgratis.com: no banners, no cookies, totally FREE.

CLASSICISTRANIERI HOME PAGE - YOUTUBE CHANNEL
Privacy Policy Cookie Policy Terms and Conditions
사용자:티첼/RFC.py - 위키백과

사용자:티첼/RFC.py

위키백과 ― 우리 모두의 백과사전.

#!/usr/bin/python
# -*-  coding: utf-8  -*-
import sys, wikipedia
import wikipedia, pagegenerators, catlib
import editarticle, time, os
import re, codecs, gc, threading
 
site = wikipedia.getSite()
def main():
# 서버에서 무한 반복으로 돌릴 때에는 아래를 사용:
#  while 1<2:
#  update([u"의견 요청"],u"틀:의견 요청 목록",u"이 문서는 봇에 의해 갱신되고 있습니다. [[분류:위키백과 틀|{{PAGENAME}}]]").start()
#    gc.collect() 
#    time.sleep(300)
  update([u"의견 요청"],u"틀:의견 요청 목록",u"이 문서는 봇에 의해 갱신되고 있습니다. [[분류:위키백과 틀|{{PAGENAME}}]]").start()
  gc.collect()
 
class update(threading.Thread):
  def __init__ ( self,templates,post_template,bottomtxt):
        self.templates = templates
        self.post_template = post_template
        self.bottomtxt = bottomtxt
        threading.Thread.__init__ ( self )
 
  def run(self):
    pages=[]
    message=''
    dic={}
    templates = self.templates
    bottomtxt ='%s' % self.bottomtxt
    post_template = self.post_template
    if len(templates)==1:
      regex ='%s' % templates[0]
    else:
      regex='('
      part=''
      for template in templates:
        regex+= part + template
        part='|'
      regex+=')'
    for template in templates:
      pagegen = pagegenerators.ReferringPageGenerator(wikipedia.Page(site, u'틀:%s' % template), onlyTemplateInclusion = True)
      for page in pagegen:
        pages.append(page)
    pages = sorted(set(pages))
    message =u''
    if pages:
      for page in pages:
        Time,line = pageparse(page,regex)
        dic[Time]=line
      keys= dic.keys()
      keys.sort(reverse=True)
      for key in keys:
        message +=u'%s' % dic[key]
    else:
      message = u'* 목록에 토론이 없습니다.'
    pagetext= u"'''다음 토론들이 공동체 수준의 관심을 요청하고 있습니다. '''\n----\n<onlyinclude>%s\n</onlyinclude>\n%s" % (message,bottomtxt)
    templatepage = wikipedia.Page(site,'%s' % post_template )
    templatepage.put(pagetext, comment=u'수정, 현재 %s 개의 토론이 있습니다.' % len(pages))
    wikipedia.output('Update, %s current discussions\n Sleeping for 5 minutes' % len(pages))
    gc.collect() 
def pageparse(page,regex):
  wikipedia.output(page.title())
  try:
    message =''
    text    = page.get()
    Time    = time.time()
    reason  = ''
    section = ''
    dic     = {}
    g = re.search(u"문단(|\s)=.*?\!\!",text,re.I)
    g = g.group(0).split('=')[1]
    g = g.split('!')[0]
    dic['section'] = g.strip()
    g2 = re.search(u"사유(|\s)=.*?\!\!",text,re.I)
    g2 = g2.group(0).split('=')[1]
    g2 = g2.split('!')[0]
    dic['reason'] = g2.strip()
    g3 = re.search(u"시각(|\s)=.*?\}\}",text,re.I)
    g3 = g3.group(0).split('=')[1]
    g3 = g3.split('}')[0]
    dic['time'] = g3.strip()
    if dic.has_key('reason'):
      reason = dic['reason']
    if dic.has_key('time'):
      Time = dic['time']
      st = Time.split() # 2008년 1월 30일 (수) 23:01 (KST)
      Time = st[0][:-1]+' '+st[1][:-1]+' '+st[2][:-1]+' '+st[4][:2]+' '+st[4][3:]
      edittime = time.strptime(Time, u"%Y %m %d %H %M")
      Time = time.mktime(edittime)
    if dic.has_key('section'):
      section = dic['section']
      section = re.sub(' ','_',section)
      section = re.sub('\[','',section)
      section = re.sub('\]','',section)
      link = u'\n* [[%s#%s|%s]] ' % (page.title(),section,page.title())
    else:
      link = u'\n* [[%s]] ' % page.title()
    if Time < (time.time()- 2592000):
      text = re.sub('\{\{'+regex+'(.*?)\}\}','',text)
      page.put(text,comment= u'오래된 의견 요청 제거')
      gc.collect() 
      return '',''
    message =u'%s%s' % (link,reason)
    message = re.sub('&#124;','|',message)
    gc.collect() 
    return Time, message
  except: 
    text = page.get()
    text = re.sub('\{\{'+regex+'(.*?)\}\}',u"{{의견 요청 오류}}",text)
    page.put(text,u"의견 요청 오류")
    gc.collect() 
    return '',''
 
if __name__ == '__main__':
  try:
    main()
  finally:
    wikipedia.stopme()


aa - ab - af - ak - als - am - an - ang - ar - arc - as - ast - av - ay - az - ba - bar - bat_smg - bcl - be - be_x_old - bg - bh - bi - bm - bn - bo - bpy - br - bs - bug - bxr - ca - cbk_zam - cdo - ce - ceb - ch - cho - chr - chy - co - cr - crh - cs - csb - cu - cv - cy - da - de - diq - dsb - dv - dz - ee - el - eml - en - eo - es - et - eu - ext - fa - ff - fi - fiu_vro - fj - fo - fr - frp - fur - fy - ga - gan - gd - gl - glk - gn - got - gu - gv - ha - hak - haw - he - hi - hif - ho - hr - hsb - ht - hu - hy - hz - ia - id - ie - ig - ii - ik - ilo - io - is - it - iu - ja - jbo - jv - ka - kaa - kab - kg - ki - kj - kk - kl - km - kn - ko - kr - ks - ksh - ku - kv - kw - ky - la - lad - lb - lbe - lg - li - lij - lmo - ln - lo - lt - lv - map_bms - mdf - mg - mh - mi - mk - ml - mn - mo - mr - mt - mus - my - myv - mzn - na - nah - nap - nds - nds_nl - ne - new - ng - nl - nn - no - nov - nrm - nv - ny - oc - om - or - os - pa - pag - pam - pap - pdc - pi - pih - pl - pms - ps - pt - qu - quality - rm - rmy - rn - ro - roa_rup - roa_tara - ru - rw - sa - sah - sc - scn - sco - sd - se - sg - sh - si - simple - sk - sl - sm - sn - so - sr - srn - ss - st - stq - su - sv - sw - szl - ta - te - tet - tg - th - ti - tk - tl - tlh - tn - to - tpi - tr - ts - tt - tum - tw - ty - udm - ug - uk - ur - uz - ve - vec - vi - vls - vo - wa - war - wo - wuu - xal - xh - yi - yo - za - zea - zh - zh_classical - zh_min_nan - zh_yue - zu -