数据分析编程:SQL,Python or SPL?

news/2024/11/19 2:21:51/

Talk is cheap. Let’s show the code

1. 计算用户会话次数

用户行为数据表

useridaction_typeaction_time
U1059login2023-12-01 18:00:10
U1092login2023-12-01 18:00:17
U1069login2023-12-01 18:00:22

10 分钟没有任何动作或退出后 5 分钟没有登录则认为会话结束,计算各用户的会话次数。

SPL

A
1=file(“session_data.csv”).import@tc()
2=A1.group(userid;~.group@i((action_type[-1]==“exit”&&interval@s(action_time[-1],action_time)>300)||(interval@s(action_time[-1],action_time)>600)).len():session_num)

SQL

sql">WITH login_data AS (SELECT userid, action_type, action_time,LAG(action_time) OVER (PARTITION BY userid ORDER BY action_time) AS prev_time,LAG(action_type) OVER (PARTITION BY userid ORDER BY action_time) AS prev_actionFROM session_data)
SELECT userid, COUNT(*) AS session_count
FROM (SELECT userid, action_type, action_time, prev_time, prev_action,CASEWHEN prev_time IS NULL OR (action_time - prev_time) > 600OR (prev_action = 'exit' AND (action_time - prev_time) > 300)THEN 1ELSE 0END AS is_new_sessionFROM login_data)
WHERE is_new_session = 1
GROUP BY userid;

Python

python">login_data = pd.read_csv("session_data.csv")
login_data['action_time'] = pd.to_datetime(login_data['action_time'])
grouped = login_data.groupby("userid")
session_count = {}
for uid, sub_df in grouped:session_count[uid] = 0start_index = 0for i in range(1, len(sub_df)):current = sub_df.iloc[i]last = sub_df.iloc[start_index]last_action = last['action_type']if (current["action_time"] - last["action_time"]).seconds > 600 or \(last_action=="exit" and (current["action_time"] - last["action_time"]).seconds > 300):session_count[uid] += 1start_index = isession_count[uid] += 1
session_cnt = pd.DataFrame(list(session_count.items()), columns=['UID', 'session_count'])

2. 1 分钟内连续得分 3 次的球员

球赛得分表

teamplayerplay_timescore
AA32023-12-31 09:00:092
BB12023-12-31 09:00:243
AA52023-12-31 09:00:572

SPL

A
1=file(“ball_game.csv”).import@tc()
2=A1.group@o(player).select(~.len()>2&&(~.pselect(#>2&&interval@s(play_time[-2],play_time)<60))).(player)

A2:group@o(),@o 选项是按顺序把相同球员分成一组,球员不同开始新的分组,然后筛选其中连续得分次数大于等于 3 次且有任意一个连续三次得分时间间隔小于 60 的分组,最后取出球员。

SQL

sql">WITH numbered_scores AS (SELECT team, player, play_time, score,ROW_NUMBER() OVER (ORDER BY play_time) AS rnFROM ball_game)
SELECT DISTINCT s1.player
FROM numbered_scores s1JOIN numbered_scores s2 ON s1.player = s2.player AND s1.rn = s2.rn - 1JOIN numbered_scores s3 ON s1.player = s3.player AND s1.rn = s3.rn - 2
WHERE (s3.play_time - s1.play_time) < 60 ;

Python

python">df = pd.read_csv("ball_game.csv")
df["play_time"] = pd.to_datetime(df["play_time"])
result_players = []
player = None
start_index = 0
consecutive_scores = 0
for i in range(len(df)-2):current = df.iloc[i]if player != current["player"]:player = current["player"]consecutive_scores = 1else:consecutive_scores += 1last2 = df.iloc[i-2] if i >=2 else Noneif consecutive_scores >= 3 and (current['play_time'] - last2['play_time']).seconds < 60:result_players.append(player)
result_players = list(set(result_players))

3. 每 7 天中连续三天活跃的用户数

用户登录表

iduseridts
14662017-01-07 18:24:55
24582017-01-07 18:25:18
34582017-01-07 18:26:21

SPL

A
1=file(“login_data.csv”).import@tc()
2=periods(date(A1.ts),date(A1.m(-1).ts))
3=A1.group(userid).(~.align(A2,date(ts)).(if(#<7,null,(cnt=~[-6:0].group@i(!~).max(count(~)),if(cnt>=3,1,0)))))
4=msum(A3).~.new(A2(#):dt,int(~):cont3_num).to(7,)

SQL

sql">WITH all_dates AS (SELECT DISTINCT TRUNC(ts) AS login_dateFROM login_data),
user_login_counts AS (SELECT userid, TRUNC(ts) AS login_date, (CASE WHEN COUNT(*)>=1 THEN 1 ELSE 0 END) AS login_countFROM login_dataGROUP BY userid, TRUNC(ts)),
whether_login AS (SELECT u.userid, ad.login_date, NVL(ulc.login_count, 0) AS login_countFROM all_dates adCROSS JOIN (SELECT DISTINCT useridFROM login_data) uLEFT JOIN user_login_counts ulcON u.userid = ulc.useridAND ad.login_date = ulc.login_dateORDER BY u.userid, ad.login_date),
whether_login_rn AS (SELECT userid,login_date,login_count,ROWNUM AS rn FROM whether_login),
whether_eq AS(SELECT userid,login_date,login_count,rn,(CASE WHEN LAG(login_count,1) OVER (ORDER BY rn)= login_count AND login_count =1 AND LAG(userid,1) OVER (ORDER BY rn)=userid THEN 0 ELSE 1 END) AS wether_e FROM whether_login_rn
),
numbered_sequence AS (SELECT userid,login_date,login_count,rn, wether_e,SUM(wether_e) OVER (ORDER BY rn) AS labFROM whether_eq),
consecutive_logins_num AS (SELECT userid,login_date,login_count,rn, wether_e,lab,(SELECT (CASE WHEN max(COUNT(*))<3 THEN 0 ELSE 1 END)FROM numbered_sequence bWHERE b.rn BETWEEN a.rn - 6 AND a.rnAND b.userid=a.useridGROUP BY b. lab) AS cntFROM numbered_sequence a)
SELECT login_date,SUM(cnt) AS cont3_num
FROM consecutive_logins_num
WHERE login_date>=(SELECT MIN(login_date) FROM all_dates)+6
GROUP BY login_date
ORDER BY login_date;

Python

python">df = pd.read_csv("login_data.csv")
df["ts"] = pd.to_datetime(df["ts"]).dt.date
grouped = df.groupby("userid")
aligned_dates = pd.date_range(start=df["ts"].min(), end=df["ts"].max(), freq='D')
user_date_wether_con3days = []
for uid, group in grouped:group = group.drop_duplicates('ts')aligned_group = group.set_index("ts").reindex(aligned_dates)consecutive_logins = aligned_group.rolling(window=7)n = 0date_wether_con3days = []for r in consecutive_logins:n += 1if n<7:continueelse:ds = r['userid'].isna().cumsum()cont_login_times = r.groupby(ds).userid.count().max()wether_cont3days = 1 if cont_login_times>=3 else 0date_wether_con3days.append(wether_cont3days)user_date_wether_con3days.append(date_wether_con3days)
arr = np.array(user_date_wether_con3days)
day7_cont3num = np.sum(arr,axis=0)
result = pd.DataFrame({'dt':aligned_dates[6:],'cont3_num':day7_cont3num})

4. 每天新用户的次日留存率

用户登录表

iduseridts
14662017-01-07 18:24:55
24582017-01-07 18:25:18
34582017-01-07 18:26:21

SPL

A
1=file(“login_data.csv”).import@tc()
2=A1.group(userid;fst=date(ts):fst_login,~.(date(ts)).pos(fst+1)>0:wether_sec_login)
3=A2.groups(fst_login+1:dt;count(wether_sec_login)/count(1):ret_rate)

A2:按用户分组,记录首次登录日期并查看第二天是否登录
A3:按第二天登录日期统计次日留存率

SQL

sql">WITH first_login AS (SELECT userid, MIN(TRUNC(ts)) AS first_login_dateFROM login_dataGROUP BY userid),
next_day_login AS (SELECT DISTINCT(fl.userid), fl.first_login_date, TRUNC(ld.ts) AS next_day_login_dateFROM first_login flLEFT JOIN login_data ld ON fl.userid = ld.useridWHERE TRUNC(ld.ts) = fl.first_login_date + 1),
day_new_users AS(SELECT first_login_date,COUNT(*) AS new_user_numFROM first_loginGROUP BY first_login_date),
next_new_users AS(SELECT next_day_login_date, COUNT(*) AS next_user_numFROM next_day_loginGROUP BY next_day_login_date),
all_date AS(SELECT DISTINCT(TRUNC(ts)) AS login_dateFROM login_data)
SELECT all_date.login_date+1 AS dt,dn. new_user_num,nn. next_user_num,(CASE WHEN nn. next_day_login_date IS NULL THEN 0 ELSE nn.next_user_num END)/dn.new_user_num AS ret_rate
FROM all_dateJOIN day_new_users dn ON all_date.login_date=dn.first_login_dateLEFT JOIN next_new_users nn ON dn.first_login_date+1=nn. next_day_login_date
ORDER BY all_date.login_date;

Python

python">df = pd.read_csv("login_data.csv")
df["ts"] = pd.to_datetime(df["ts"]).dt.date
gp = df.groupby('userid')
row = []
for uid,g in gp:fst_dt = g.iloc[0].tssec_dt = fst_dt + pd.Timedelta(days=1)all_dt = g.ts.valueswether_sec_login = sec_dt in all_dtrow.append([uid,fst_dt,sec_dt,wether_sec_login])
user_wether_ret_df = pd.DataFrame(row,columns=['userid','fst_dt','sec_dt','wether_sec_login'])
result = user_wether_ret_df.groupby('sec_dt').apply(lambda x:x['wether_sec_login'].sum()/len(x))

5. 股价高于前后 5 天时当天的涨幅

股价信息表

STOCKIDDATECLOSING
622015-01-058.91
622015-01-068.31
622015-01-077.6

SPL

A
1=file(“STOCK.csv”).import@tc()
2=lth=A1.len(),A1.pselect@a(#>4&&#<=A1.len()-4&&CLOSING>max(CLOSING[-4:-1])&&CLOSING>max(CLOSING[1:4]))
3=A1.calc(A2,CLOSING/CLOSING[-1]-1)

SQL

sql">SELECT closing/closing_pre-1 AS raise
FROM(SELECT dt, closing, ROWNUM AS rn,MAX(closing) OVER (ORDER BY dt ROWS BETWEEN 5 PRECEDING AND 1 PRECEDING) AS max_pre,MAX(closing) OVER (ORDER BY dt ROWS BETWEEN 1 FOLLOWING AND 5 FOLLOWING) AS max_suf,LAG(closing,1) OVER (ORDER BY dt) AS closing_preFROM stock)
WHERE rn>5 AND rn<=(select count(*) FROM stock)-5AND CLOSING>max_pre  AND CLOSING>max_suf;

Python

python">stock_price_df = pd.read_csv('STOCK.csv')
price_increase_list = []
for i in range(5, len(stock_price_df)-5):if stock_price_df['CLOSING'][i] > max(stock_price_df['CLOSING'][i-5:i]) and \stock_price_df['CLOSING'][i] > max(stock_price_df['CLOSING'][i+1:i+6]):price_increase = stock_price_df['CLOSING'][i] / stock_price_df['CLOSING'][i-1]-1price_increase_list.append(price_increase)
result = price_increase_list

到底好不好用下载试试就知道了~~免费下载试用


http://www.ppmy.cn/news/1548127.html

相关文章

安全见闻2

声明&#xff01; 学习视频来自B站up主 泷羽sec 有兴趣的师傅可以关注一下&#xff0c;如涉及侵权马上删除文章&#xff0c;笔记只是方便各位师傅的学习和探讨&#xff0c;文章所提到的网站以及内容&#xff0c;只做学习交流&#xff0c;其他均与本人以及泷羽sec团队无关&#…

JS学习日记(jQuery库)

前言 今天先更新jQuery库的介绍&#xff0c;它是一个用来帮助快速开发的工具 介绍 jQuery是一个快速&#xff0c;小型且功能丰富的JavaScript库&#xff0c;jQuery设计宗旨是“write less&#xff0c;do more”&#xff0c;即倡导写更少的代码&#xff0c;做更多的事&#xf…

怎么用家用电脑做服务器(web服务器、ftp服务器、小程序服务器,云电脑)

原料&#xff1a; 1、家用电脑&#xff0c;是电脑就行 2、宽带&#xff0c;这个有要求哦&#xff0c;必须是官方宽带&#xff0c;北乔峰&#xff0c;南慕容&#xff0c;北联通南电信&#xff0c;什么长城宽带等等地方小帮派&#xff0c;都没有朝廷的公网IP&#xff0c;没法直接…

java 读取 有时需要sc.nextLine();读取换行符 有时不需要sc.nextLine();读取换行符 详解

在 Java 中&#xff0c;使用 Scanner 类读取输入时&#xff0c;换行符的处理行为取决于所用的读取方法。不同方法的工作原理会影响是否需要额外调用 sc.nextLine() 来清理缓冲区中的换行符。 核心问题 根本原因&#xff1a;Scanner 是基于输入流工作的&#xff0c;而换行符&am…

前端无感刷新token

摘要&#xff1a; Axios 无感知刷新令牌是一种在前端应用中实现自动刷新访问令牌&#xff08;access token&#xff09;的技术&#xff0c;确保用户在进行 API 请求时不会因为令牌过期而中断操作 目录概览 XMLHttpRequestAxiosFetch APIJQuni.request注意事项&#xff1a; 访问…

比较TCP/IP和OSI/RM的区别

一、结构不同 1、OSI&#xff1a;OSI划分为7层结构&#xff1a;物理层、数据链路层、网络层、传输层、会话层、表示层和应用层。 2、TCP/IP&#xff1a;TCP/IP划分为4层结构&#xff1a;应用层、传输层、互联网络层和主机-网络层。 二、性质不同 1、OSI&#xff1a;OSI是制定…

【蓝桥等考C++真题】蓝桥杯等级考试C++组第13级L13真题原题(含答案)-统计数字

CL13 统计数字(50 分) 一场歌唱比赛有不超过 26 位选手参加&#xff0c;选手的代号依次用大写字母 A、B、C……表示。在节目现场观众要给这些选手投票&#xff0c;每人投一票&#xff0c;选出最喜欢的选手。已知观众的投票记录&#xff0c;请将所有选手的得票数从高到低进行排…

Spring Boot框架:电商系统的技术优势

摘 要 现代经济快节奏发展以及不断完善升级的信息化技术&#xff0c;让传统数据信息的管理升级为软件存储&#xff0c;归纳&#xff0c;集中处理数据信息的管理方式。本网上商城系统就是在这样的大环境下诞生&#xff0c;其可以帮助管理者在短时间内处理完毕庞大的数据信息&…