日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

2015美国数学建模a代码c语言,美国数学建模比赛2020 MCM C题代码详解

發布時間:2024/9/19 编程问答 28 豆豆
生活随笔 收集整理的這篇文章主要介紹了 2015美国数学建模a代码c语言,美国数学建模比赛2020 MCM C题代码详解 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

# #!-*- coding:utf-8 -*-

import pandas as pd

import numpy as np

import xlrd

import vaderSentiment

from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer

from sklearn.preprocessing import StandardScaler

from openpyxl import load_workbook # 寫入excel

from wordcloud import WordCloud as wc

import jieba # 結巴分詞

import matplotlib.pyplot as plt # 繪圖

from collections import defaultdict # 字典,用于詞頻統計

from PIL import Image # 打開圖片,用于詞云背景層

import cv2

from pyecharts.charts import Bar

import datetime

from pandas import Series

# #######################################################################################

# 1.讀取數據與數據預處理

# data = pd.read_csv("C:\\Users\\thous\\PycharmProjects\\untitled4\\2018Q3.csv")

hair_dryer = pd.read_csv("C:\\Users\\thous\\PycharmProjects\\untitled4\\hair_dryer.csv")

hair_dryer_review = hair_dryer['review_body']

# 刪除重復數據

print('hair_dryer刪除重復數據前大小:',hair_dryer.shape)

hair_dryer.drop_duplicates()

print('hair_dryer刪除重復數據后大小:',hair_dryer.shape)

#查看是否有缺失值

hair_dryer.isnull().sum()

# #######################################################################################

# 2.計算三件商品評論的情感得分;

analyzer = SentimentIntensityAnalyzer()

hair_dryer_sentiments = [analyzer.polarity_scores(review_body) for review_body in hair_dryer_review]

# float() argument must be a string or a number, not 'dict'

# 'float' object has no attribute 'split',需要把float換成其他格式

# 特征格式變換

# data['term'] = data['term'].str.replace(' months', '').astype('float')

# col = data.select_dtypes(include=['int64', 'float64']).columns

# col = col.drop('loan_status') # 剔除目標變量

# 將得分作為新的數據列與hair_dryer數據合并;

hair_dryer = hair_dryer.join(pd.DataFrame(hair_dryer_sentiments))

# #######################################################################

# 4.將Date列轉換為日期格式;時間分析

hair_dryer['review_date'] = pd.to_datetime(hair_dryer['review_date'])

# 5.將新的日期設置為index;

hair_dryer.set_index(hair_dryer['review_date'], inplace=True)

"""

hair_dryer['weekday'] = hair_dryer['review_date'].dt.weekday

df1 = hair_dryer.set_index('review_date')

df1.resample('D').size().sort_values(ascending=False).head(100)

df2 = df1.resample('M').size().to_period()

# df2 = df2.reset_index(df2['review_date'], inplace=True)

print(hair_dryer['weekday'])

weekday

0 1777

1 1702

2 1641

3 1849

4 1531

5 1536

6 1434

"""

# ###################################################################

# 獲取帶星期的簡化日期,如:11-25周六

def getWeek(x):

tstr = x.strftime('%Y-%m-%d')

dDay = tstr.split('-', 1)[1]

weekDict = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}

dWeek = x.weekday()

return weekDict[dWeek]

hair_dryer['weekday'] = hair_dryer['review_date'].map(getWeek)

# print("weekday_hair_dryer", hair_dryer['weekday'])

weekday_hair_dryer = hair_dryer['weekday']

# hair_dryer = hair_dryer.join(pd.DataFrame(weekday_hair_dryer))

hair_dryer['month'] = pd.to_datetime(hair_dryer['review_date']).dt.month

# print("hair_dryer['month']", hair_dryer['month'])

month_hair_dryer = hair_dryer['month']

hair_dryer['months'] = hair_dryer['month']

hair_dryer = hair_dryer.drop(["month"], axis=1)

# #######################################################################################

# 購買量的增長情況

hair_dryer['date'] = pd.to_datetime(hair_dryer['review_date']).dt.strftime('%Y-%m-%d')

date_num = hair_dryer['date']

date_num.apply(pd.value_counts)

data_counts = hair_dryer['date'].value_counts()

# print(data_counts)

date_num_describe = date_num.describe()

"""

print(date_num_describe)

count 11470

unique 2307

top 2010-08-05

freq 146

"""

hair_dryer_describe = hair_dryer.describe()

# print(hair_dryer_describe)

hair_dryer_corr = hair_dryer.corr()

# print(hair_dryer_corr)

# data_counts.to_csv('hair_dryer_data_counts.csv')

# hair_dryer.to_csv('hair_dryer_new.csv')

# 手動讀取,數據預處理

# #################################################################################

# 數據預處理完 我們調用新數據

hair_dryer_new = pd.read_csv("C:\\Users\\thous\\PycharmProjects\\untitled4\\hair_dryer_new.csv")

# hair_dryer = pd.read_csv("C:\\Users\\thous\\PycharmProjects\\untitled4\\hair_dryer.csv")

# hair_dryer_review = hair_dryer['review_body']

# #################################################################################

# 按產品id統計,并繪制散點圖

hair_dryer_productid = hair_dryer_new['product_id'].value_counts().sort_values(ascending=False)

print("hair_dryer_productid", hair_dryer_productid)

# 很容易獲取 前10后10的產品

# 如何對應他們的評分平均值 EXCEL,波士頓矩陣

# hair_dryer_productid.to_csv('hair_dryer_productid.csv')

# hair_dryer_productid.plot(kind='scatter')

# ValueError: plot kind scatter can only be used for data frames

# #################################################################################

# 統計指定單詞出現的次數

"""

import sys

File_tuple1 = open(r'english.txt') #打開目標文件

File_tuple2 = File_tuple1.read()

File_tuple1.close()

File_list = File_tuple2.split(' ') #以空格來劃分文件中的單詞

#print(File_list)

x = input('請輸入要查詢的單詞:')

a = 0

i = 0

for i in range(len(File_list)):

if File_list[i]==x:

a+=1

print (x,'在english.txt中出現的次數為',a, '次。')

"""

# ################################################################################################

# 預測

# hair_dryer_new = pd.read_csv("C:\\Users\\thous\\PycharmProjects\\untitled4\\hair_dryer_new.csv")

# 查看因變量情況

star_rating_hairdryer = hair_dryer_new['star_rating']

# print("star_rating_hairdryer", hair_dryer_new['star_rating'].value_counts())

sns.countplot(hair_dryer_new.star_rating)

# plt.tick_params(axis='x', labelsize=6)

plt.show()

詞云1 ###############################################

hair_dryer_review_text = open(r"C:\\Users\\thous\\PycharmProjects\\untitled4\\hair_dryer_review.txt",

'r', encoding='UTF-8')

hair_dryer_text = hair_dryer_review_text.read()

cut_hair_dryer_text = " ".join(jieba.cut(hair_dryer_text))

color_mask_hair_dryer = cv2.imread('mask.jpg')

# coloring=np.array(Image.open("cat_new.jpg"))

# #獲取背景圖片,new.jpg

# alice_mask = np.array(Image.open(path.join(d, "alice_mask.png")))

cloud = wc(

# 設置字體,不指定就會出現亂碼

font_path="C:\\Windows\\Fonts\\Times New Roman.TTF",

# font_path=path.join(d,'simsun.ttc'),

# 設置背景色

background_color='white',

# 詞云形狀

mask=color_mask_hair_dryer,

# 允許最大詞匯

max_words=2000,

# 最大號字體

max_font_size=40

)

# 直接根據分詞結果生成簡單的詞云圖

wordcloud = wc().generate(cut_hair_dryer_text)

# wc = WordCloud(background_color="white", max_words=2000, mask=alice_mask, stopwords=stopwords)

# wordcloud = wd(width=1000, height=860, margin=2, font_path="simsun.ttf", background_color="white", max_font_size=180,

mask=myimg).fit_words(wordfrequency) # 根據詞頻字典生成詞云

plt.imshow(wordcloud, interpolation='bilinear')

plt.axis('off')# 不顯示坐標軸

plt.show()

總結

以上是生活随笔為你收集整理的2015美国数学建模a代码c语言,美国数学建模比赛2020 MCM C题代码详解的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。