【Python常用代码归纳】

一、python常用的库及下载

python常用的库

import numpy as np
import pandas as pd
import jieba  
import collections
import re
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from PIL import Image
import sys
import os
import json
import shutil
from snownlp import SnowNLP
//忽略警告
import warnings
warnings.filterwarnings("ignore")

使用镜像下载:

pip install pymysql -i  https://pypi.tuna.tsinghua.edu.cn/simple  //清华镜像
pip install -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com pyperclip  //阿里云镜像

二、全部行都能输出

from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'

三、创建随机数组

1、数组函数

np.array(object,dtype = None)

2、创建ndarray常用数组

//全零数组:
np.zeros(shape,dtype)/np.zeros_like(arr)
//全一数组:
np.ones(shape,dtype)/np.ones_like(arr)
//全N数组:
np.full(shape,full_value)/np.full(arr,full_value)
//单位数组:
np.eye()

3、创建ndarray随机数组

//随机整数数组:
np.random.randint(low,high = None,size = None,dtype)
//均匀分布:
np.random.rand(d0,d1...)
//标准正太分布:
np.random.randn()
//正态分布:
np.random.normal(loc,scale,size)
//设置随机数种子:
np.random.seed()
//线性序列数组:retstep = True显示产生的间隔,左闭右开
np.arange(start,stop,step) / np.linspace(start,stop,num,retstep = True)
//数组拼接:
np.concatenate([x,y],axis = 1) //#0是纵向,1是横向
//生成线性数组并改变数组形状
np.arange(2,12).reshape([2,5])

四、读取本地数据

grade = pd.read_csv("text.txt",encoding='utf-8',sep = 't')

读取本地数据报错解决方案

报错内容:
UnicodeDecodeError: ‘utf-8’ codec can’t decode byte 0xcc in position 0: invalid continuation byte

方案:
window新建的txt、wps默认是ANSI编码格式比如csv文件,修改编码格式为UTF-8:第一步:选中已经转换好的csv文件,并右击鼠标,选择【打开方式】-【记事本】 第二步:点击【文件】-【另存为】 第三步:编码选择UTF-8后,点击【保存】这样修改之后的csv文件的编码就是UTF-8了。

// 代码首页写 # -- coding: utf-8 --

五、文本数据处理

//将姓名的“张”姓替换为“薛”姓
grade.姓名.map(lambda x:x.replace('张','薛'))   //第一种
grade.姓名.str.replace('张','薛')   //第二种
//统计姓名的长度
grade.姓名.map(lambda x:len(x)).value_counts()  //第一种
grade.姓名.str.len().value_counts()   //第二种
//get 得到姓名的第一个字符
grade.姓名.str.get(0)
//cat 对字符串进行连接
grade.姓名.str.cat(sep= '|')
//repeat 把某个元素重复N次
grade.姓名.str.repeat(3)
//join 在字符中间插入字符
grade.姓名.str.join(' ')
//字符串的分割
wine.机构描述.str.split('',expand = True)
//选出包含“国内研发中心”的机构
wine.细分类型.str.count('国内研发中心').value_counts()
wine[wine.细分类型.str.count('国内研发中心')>0]
//contains 包含:判断字符串中是否包含某个字符
wine.细分类型.str.contains('国内')
wine[wine.细分类型.str.contains('国内')]  //查看结果
//startswith: 判断是否以子串开始
wine.细分类型.str.startswith('国内')
//endswith: 判断是否以子串结束
wine.细分类型.str.endswith('国内')

六、爬虫–京东商品评价

import asyncio
import aiohttp
import re
import logging
import datetime

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
start = datetime.datetime.now()

class Spider(object):
    def __init__(self):
        self.semaphore = asyncio.Semaphore(6)
        # 伪装请求头
        self.header = {
            "Host": "club.jd.com",
            "Cookie": "__jdu=16564751429841859217559; areaId=13; mt_xid=V2_52007VwMVVlpdVF4fTBpUDGIDG1ZVW1JcG00RbAU1VkVSVVFSRkxPEVUZYlEWB0FRVwkfVR5fAWUBQVReWFpaGXkaXQZkHxNUQVlVSx9MElgFbAMWYl9oUWocSB9UAGIzEVVdXg%3D%3D; shshshfpa=1ad54bbe-626e-eb78-9f82-81da8e037794-1657090277; shshshfpb=wRkD4Lozs4iCbBk0HwllIOg; ipLoc-djd=13-1007-37917-37974; jwotest_product=99; TrackID=1dbQc8XQtoflTmIq1lHWuR3Ljnyykouv9N1NWKI4o1jCi8-n6AZR5s3Em3DMTrb1CFni4BcD7qniNqlgZ4dKNpzK7MNS56ALxKVBb7Vpl5ZhOcq9O1Lgt7uW8MxJWjJt6fesAJHHnx3ZRyFMDTiA04g; pinId=b8RdRIWtEmZUbs3RS21PwLV9-x-f3wj7; pin=jd_6b8e3dfac7308; unick=u_fh8kexmt85fz; ceshi3.com=103; _tp=q0EtGd3m4oxnMVbMXATMbYCLw4lGL8fefHJ0h7FeoSA%3D; _pst=jd_6b8e3dfac7308; cn=15; PCSYCityID=CN_370000_370200_0; unpl=JF8EALJnNSttWB5dVkwDHRsRS1VSW14ASkRRb24HBA1YSgQMHlEdFBJ7XlVdXhRKER9uYRRVVVNIUA4fBysSEXteU11bD00VB2xXXAQDGhUQR09SWEBJJVlQXl4ITxUBbWQ1ZF5Ye1QEKwIcFhdOW1BeWAtDHgZvbgFcXlhNUAETMisVIHttUV9dCUsRM25XBGQfDBdSBxsBGhBdS1pQWVgOTxcGbG8MUV1RT1wGGwQfFhh7XGRd; __jdv=76161171|haosou-search|t_262767352_haosousearch|cpc|5512153332_0_1d8bf07801974290bc183aa10a84b773|1657157153383; thor=EDD3178B5C861682395D329ACC8F414499FDCE347DCA61326B0A4DFA763C60FBD160FE66214CD1DB097B75D4FAAE4A23F0EC652A4C01DB85EF55C982D34C1DDD14B2DAD259E4331E99ACBF4EFAAF1EBAFD35E06083321C46626A1188EC98FB9B4B97220203D21C5634EA48D79949A96A48A4C822E07BA409B89A2191FF5A65642C778C335CBF24DCD34F9344A0D67DCD22036259AEEC0939E5A946B7FCD46C4E; __jda=122270672.16564751429841859217559.1656475143.1657093734.1657157153.4; __jdc=122270672; shshshfp=61cee0a2f4438ee8a044810ef92a5f81; ip_cityCode=1007; 3AB9D23F7A4B3C9B=IVX6FJJQQ4T5KH5CHZEJ325JAK6NHVGK2PCWR7GZDY4HJV6777QVVKUFNNR2VRPLUNO7QJ4A2C4YXAVDFE3N4UOJJM; JSESSIONID=C5F45D90F1894444C24EDAFB95DD590C.s1",
            "Connection": "keep-alive",
            "Referer": "https://item.jd.com/100039721172.html",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.66 Safari/537.36 Edg/103.0.1264.44"

        }

    async def scrape(self, url):
        async with self.semaphore:
            session = aiohttp.ClientSession(headers=self.header)
            response = await session.get(url)
            result = await response.text()
            await session.close()
            return result

    async def scrape_page(self, page):
        url = f'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98&productId=100039721172&score=0&sortType=5&page={page}&pageSize=10&isShadowSku=0&fold=1'
        text = await self.scrape(url)
        await self.parse(text)

    async def parse(self, text):
        content = re.findall('"guid":".*?","content":"(.*?)"', text)
        with open('C1.txt', 'a+') as f:
            for con in content:
                f.write(con + 'n')
                logging.info(con)

    def main(self):
        # 100页的数据
        scrape_index_tasks = [asyncio.ensure_future(self.scrape_page(page)) for page in range(0, 100)]
        loop = asyncio.get_event_loop()
        tasks = asyncio.gather(*scrape_index_tasks)
        loop.run_until_complete(tasks)

if __name__ == '__main__':
    spider = Spider()
    spider.main()
    delta = (datetime.datetime.now() - start).total_seconds()
    print("用时:{:.3f}s".format(delta))

七、 jieba分词+绘制词云图

import jieba
import collections
import re
from wordcloud import WordCloud
import matplotlib.pyplot as plt

with open('C1.txt') as f:
    data = f.read()

# 文本预处理  去除一些无用的字符   只提取出中文出来
new_data = re.findall('[u4e00-u9fa5]+', data, re.S)
new_data = "/".join(new_data)

# 文本分词
seg_list_exact = jieba.cut(new_data, cut_all=True)

result_list = []
with open('stop_words.txt', encoding='utf-8') as f:
    con = f.readlines()
    stop_words = set()
    for i in con:
        i = i.replace("n", "")   # 去掉读取每一行数据的n
        stop_words.add(i)

for word in seg_list_exact:
    # 设置停用词并去除单个词
    if word not in stop_words and len(word) > 1:
        result_list.append(word)
print(result_list)

# 筛选后统计
word_counts = collections.Counter(result_list)

# 绘制词云
my_cloud = WordCloud(
    background_color='white',  # 设置背景颜色  默认是black
    width=800, height=550,
    font_path='simhei.ttf',   # 设置字体  显示中文
    max_font_size=112,        # 设置字体最大值
    min_font_size=12,         # 设置子图最小值
    random_state=80           # 设置随机生成状态,即多少种配色方案
).generate_from_frequencies(word_counts)

# 显示生成的词云图片
plt.imshow(my_cloud, interpolation='bilinear')
# 显示设置词云图中无坐标轴
plt.axis('off')
plt.show()
//显示词和词频
word_counts
//通过 dict 的方法获得 Counter 对象的 键 和 值
word_counts.keys()
word_counts.values()
//获得一个列表,包含出现次数最多的若干元素及次数,可设置个数,不设置个数则返回所有
word_counts.most_common(100)

添加用户自定义词典

cmd 输入 %TEMP% 清除缓存 E:AnacondaLibsite-packagesjiebadict 添加用户自定义词典

八、 将生成的结果导出为EXCEL

import operator
import xlwt  //python官方Excel库
import pandas as pd


# 按字典值降序排序
x = dict(word_counts)
sorted_y = sorted(x.items(),key=operator.itemgetter(1),reverse=True)
sorted_y

word = pd.DataFrame(list(sorted_y),columns=["关键词","词频"])
word
#输出关键词和词频为excel
def export_excel(word):
 #将字典列表转换为DataFrame
 #pf = pd.DataFrame(list(word))
 #指定字段顺序
 order = ["关键词","词频"]
 pf = word[order]
 #将列名替换为中文
 columns_map = {
  '关键词':'关键词',
  '词频':'词频'
 }
 pf.rename(columns = columns_map,inplace = True)
 #指定生成的Excel表格名称
 file_path = pd.ExcelWriter('xiyiji.xlsx')
 #替换空单元格
 pf.fillna(' ',inplace = True)
 #输出
 pf.to_excel(file_path,encoding = 'utf-8',index = False)
 #保存表格
 file_path.save()
if __name__ == '__main__':
 #将分析完成的列表导出为excel表格
 export_excel(word)

九、关键词提取+词性标注

import jieba.analyse as anls #关键词提取
 
'''功能:提取关键词'''
 
sent = open("test_list.txt", 'r', encoding='UTF-8').read()
 
#基于tf-idf提取关键词
print("基于TF-IDF提取关键词结果:")
for x, w in anls.extract_tags(sent, topK=20, withWeight=True):
    print('%s %s' % (x, w))
 
#基于textrank提取关键词
print("基于textrank提取关键词结果:")
for x, w in anls.textrank(sent, withWeight=True):
    print('%s %s' % (x, w))
import jieba.posseg as pseg #词性标注
sent = "他在北京大学读书"
words = pseg.cut(sent)
for word, flag in words:
    print("{0} {1}".format(word, flag))

十、python xlrd模块读取Excel内容

import xlrd
 
'''
xlrd模块实现对excel文件内容读取
'''
# 1、打开Excel文件
workbook = xlrd.open_workbook('test.xls')
 
print(workbook.sheet_names())  # 获取所有sheet页名称   ['基本信息', '附加信息']
 
# 2、获取读入的文件的sheet页
sheet1 = workbook.sheet_by_index(0)   # 索引的方式  从0开始
sheet2 = workbook.sheet_by_name('附加信息')  # sheet页名字的方式
 
# 3、获取sheet的最大行数和列数,以及sheet也名称
nrows =  sheet1.nrows  # 行
ncols = sheet1.ncols  # 列
name = sheet1.name  # 名称
 
# 4、获取某个单元格的值
val = sheet1.cell(1,2).value  # 获取 23列的表格值  (01行、1列)
 
print(val)    # 一年级
 
# 5、获取整行或整列的值
row_val = sheet1.row_values(1)  # 获取第2行所有单元格的值
col_val = sheet1.col_values(0,1)  # 获取第1,2行以后所有单元格的值
 
print(row_val)    #  ['张三', 12.0, '一年级']
print(col_val)   # [ '张三', '李四', '王五']
 
 
# 6、获取单元格内容的数据类型
'''说明:ctype : 0 empty,1 string, 2 number, 3 date, 4 boolean, 5 error'''
type = sheet1.cell(1,2).ctype
 
print(type)  # 1

十一、python实现将文本一句话分成多列并导出为EXCEL

#读取数据
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import jieba
import numpy as numpy
df_news = pd.read_table('lg_test.txt',names=['content'],encoding='utf-8')
df_news = df_news.dropna()  #删除缺失值
df_news.head()

df_news['content']=df_news['content'].str.replace('d+', '',regex=True) #替换
df_news['content']

df_news['content']=df_news['content'].str.replace('[A-z]', '',regex=True) 

content = df_news.content.values.tolist() #将每一篇文章转换成一个list
content[0]

stopwords=pd.read_csv('stop_words.txt',header=None)
stopwords.head()

stopwords =  stopwords[0].tolist()
stopwords.append('rn')
stopwords
content_S = []
for line in content:
    current_segment = jieba.lcut(line) #对每一篇文章进行分词,返回的列表
    current_segment=[segment for segment in current_segment if segment not in stopwords] #去停用词后的列表
    segments=" ".join(current_segment)
    content_S.append(segments) #保存分词的结果

content = pd.DataFrame(content_S,columns=['text'])
content

df2 = pd.DataFrame((x.split(' ') for x in content['text']))
df2.head()

df3 = pd.DataFrame(df2.index)
df3
df2 = pd.concat([df2, pd.DataFrame(columns=['f'])])
df2['f'] = df3
df2

df2.to_excel('ans.xlsx', index=False)

十一、python实现数据行列转换

sample_df = pd.read_table('ans.txt',encoding='utf-8')
sample_df

sample_df.columns = np.where(sample_df.columns.str.match('^Supp.*D+$'),
                      sample_df.columns + '.0',
                      sample_df.columns)
                                          
unpivotted_df = (pd.wide_to_long(sample_df,
                 stubnames = ['Supplier'],
                 i = ['f'],
                 j = 'idx',
                 suffix='.+',
                 sep='.'))

df6 = unpivotted_df.dropna(axis = 0,how = 'any')
df6

df6.to_excel('unpivotted_df.xlsx') #保存
// s_year.to_excel('s_year.xlsx',merge_cells=False) 不合并单元格

//
out = unpivotted_df.unstack().sort_index(level=1,axis=1).reindex(unpivotted_df['f'])
out.columns=out.columns.map('{0[0]}.{0[1]}'.format) 
out.reset_index()

Alt

Alt

十二、This event loop is already running”问题解决

import asyncio
import nest_asyncio 
nest_asyncio.apply()
async def func():
    print("开始")
    response = await asyncio.sleep(2)
    print("结束",response)
  
loop = asyncio.get_event_loop()
loop.run_until_complete(func())

本图文内容来源于网友网络收集整理提供,作为学习参考使用,版权属于原作者。
THE END
分享
二维码
< <上一篇
下一篇>>