In [1]:
import numpy as np import pandas as pd PREVIOUS_MAX_ROWS = pd.options.display.max_rows pd.options.display.max_rows = 25 pd.options.display.max_columns = 20 pd.options.display.max_colwidth = 82 np.random.seed(12345) import matplotlib.pyplot as plt plt.rc("figure", figsize=(10, 6)) np.set_printoptions(precision=4, suppress=True)
In [2]:
import numpy as np import pandas as pd
In [3]:
float_data = pd.Series([1.2, -3.5, np.nan, 0]) float_data
In [5]:
string_data = pd.Series(["aardvark", np.nan, None, "avocado"]) string_data string_data.isna() float_data = pd.Series([1, 2, None], dtype='float64') float_data float_data.isna()
In [6]:
data = pd.Series([1, np.nan, 3.5, np.nan, 7]) data.dropna()
In [8]:
data = pd.DataFrame([[1., 6.5, 3.], [1., np.nan, np.nan], [np.nan, np.nan, np.nan], [np.nan, 6.5, 3.]]) data data.dropna()
In [10]:
data[4] = np.nan data data.dropna(axis="columns", how="all")
In [11]:
df = pd.DataFrame(np.random.standard_normal((7, 3))) df.iloc[:4, 1] = np.nan df.iloc[:2, 2] = np.nan df df.dropna() df.dropna(thresh=2)
In [13]:
df.fillna({1: 0.5, 2: 0})
In [14]:
df = pd.DataFrame(np.random.standard_normal((6, 3))) df.iloc[2:, 1] = np.nan df.iloc[4:, 2] = np.nan df df.fillna(method="ffill") df.fillna(method="ffill", limit=2)
In [15]:
data = pd.Series([1., np.nan, 3.5, np.nan, 7]) data.fillna(data.mean())
In [16]:
data = pd.DataFrame({"k1": ["one", "two"] * 3 + ["two"], "k2": [1, 1, 2, 3, 3, 4, 4]}) data
In [19]:
data["v1"] = range(7) data data.drop_duplicates(subset=["k1"])
In [20]:
data.drop_duplicates(["k1", "k2"], keep="last")
In [21]:
data = pd.DataFrame({"food": ["bacon", "pulled pork", "bacon", "pastrami", "corned beef", "bacon", "pastrami", "honey ham", "nova lox"], "ounces": [4, 3, 12, 6, 7.5, 8, 3, 5, 6]}) data
In [22]:
meat_to_animal = { "bacon": "pig", "pulled pork": "pig", "pastrami": "cow", "corned beef": "cow", "honey ham": "pig", "nova lox": "salmon" }
In [23]:
data["animal"] = data["food"].map(meat_to_animal) data
In [24]:
def get_animal(x): return meat_to_animal[x] data["food"].map(get_animal)
In [25]:
data = pd.Series([1., -999., 2., -999., -1000., 3.]) data
In [26]:
data.replace(-999, np.nan)
In [27]:
data.replace([-999, -1000], np.nan)
In [28]:
data.replace([-999, -1000], [np.nan, 0])
In [29]:
data.replace({-999: np.nan, -1000: 0})
In [30]:
data = pd.DataFrame(np.arange(12).reshape((3, 4)), index=["Ohio", "Colorado", "New York"], columns=["one", "two", "three", "four"])
In [31]:
def transform(x): return x[:4].upper() data.index.map(transform)
In [32]:
data.index = data.index.map(transform) data
In [33]:
data.rename(index=str.title, columns=str.upper)
In [34]:
data.rename(index={"OHIO": "INDIANA"}, columns={"three": "peekaboo"})
In [35]:
ages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32]
In [36]:
bins = [18, 25, 35, 60, 100] age_categories = pd.cut(ages, bins) age_categories
In [37]:
age_categories.codes age_categories.categories age_categories.categories[0] pd.value_counts(age_categories)
In [38]:
pd.cut(ages, bins, right=False)
In [39]:
group_names = ["Youth", "YoungAdult", "MiddleAged", "Senior"] pd.cut(ages, bins, labels=group_names)
In [40]:
data = np.random.uniform(size=20) pd.cut(data, 4, precision=2)
In [41]:
data = np.random.standard_normal(1000) quartiles = pd.qcut(data, 4, precision=2) quartiles pd.value_counts(quartiles)
In [42]:
pd.qcut(data, [0, 0.1, 0.5, 0.9, 1.]).value_counts()
In [43]:
data = pd.DataFrame(np.random.standard_normal((1000, 4))) data.describe()
In [44]:
col = data[2] col[col.abs() > 3]
In [45]:
data[(data.abs() > 3).any(axis="columns")]
In [46]:
data[data.abs() > 3] = np.sign(data) * 3 data.describe()
In [48]:
df = pd.DataFrame(np.arange(5 * 7).reshape((5, 7))) df sampler = np.random.permutation(5) sampler
In [49]:
df.take(sampler) df.iloc[sampler]
In [50]:
column_sampler = np.random.permutation(7) column_sampler df.take(column_sampler, axis="columns")
In [52]:
choices = pd.Series([5, 7, -1, 6, 4]) choices.sample(n=10, replace=True)
In [53]:
df = pd.DataFrame({"key": ["b", "b", "a", "c", "a", "b"], "data1": range(6)}) df pd.get_dummies(df["key"], dtype=float)
In [54]:
dummies = pd.get_dummies(df["key"], prefix="key", dtype=float) df_with_dummy = df[["data1"]].join(dummies) df_with_dummy
In [55]:
mnames = ["movie_id", "title", "genres"] movies = pd.read_table("datasets/movielens/movies.dat", sep="::", header=None, names=mnames, engine="python") movies[:10]
In [56]:
dummies = movies["genres"].str.get_dummies("|") dummies.iloc[:10, :6]
In [57]:
movies_windic = movies.join(dummies.add_prefix("Genre_")) movies_windic.iloc[0]
In [58]:
np.random.seed(12345) # to make the example repeatable values = np.random.uniform(size=10) values bins = [0, 0.2, 0.4, 0.6, 0.8, 1] pd.get_dummies(pd.cut(values, bins))
In [59]:
s = pd.Series([1, 2, 3, None]) s s.dtype
In [60]:
s = pd.Series([1, 2, 3, None], dtype=pd.Int64Dtype()) s s.isna() s.dtype
In [62]:
s = pd.Series([1, 2, 3, None], dtype="Int64")
In [63]:
s = pd.Series(['one', 'two', None, 'three'], dtype=pd.StringDtype()) s
In [64]:
df = pd.DataFrame({"A": [1, 2, None, 4], "B": ["one", "two", "three", None], "C": [False, None, False, True]}) df df["A"] = df["A"].astype("Int64") df["B"] = df["B"].astype("string") df["C"] = df["C"].astype("boolean") df
In [65]:
val = "a,b, guido" val.split(",")
In [66]:
pieces = [x.strip() for x in val.split(",")] pieces
In [67]:
first, second, third = pieces first + "::" + second + "::" + third
In [69]:
"guido" in val val.index(",") val.find(":")
In [72]:
val.replace(",", "::") val.replace(",", "")
In [73]:
import re text = "foo bar\t baz \tqux" re.split(r"\s+", text)
In [74]:
regex = re.compile(r"\s+") regex.split(text)
In [76]:
text = """Dave dave@google.com Steve steve@gmail.com Rob rob@gmail.com Ryan ryan@yahoo.com""" pattern = r"[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}" # re.IGNORECASE makes the regex case insensitive regex = re.compile(pattern, flags=re.IGNORECASE)
In [78]:
m = regex.search(text) m text[m.start():m.end()]
In [80]:
print(regex.sub("REDACTED", text))
In [81]:
pattern = r"([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})" regex = re.compile(pattern, flags=re.IGNORECASE)
In [82]:
m = regex.match("wesm@bright.net") m.groups()
In [84]:
print(regex.sub(r"Username: \1, Domain: \2, Suffix: \3", text))
In [85]:
data = {"Dave": "dave@google.com", "Steve": "steve@gmail.com", "Rob": "rob@gmail.com", "Wes": np.nan} data = pd.Series(data) data data.isna()
In [86]:
data.str.contains("gmail")
In [87]:
data_as_string_ext = data.astype('string') data_as_string_ext data_as_string_ext.str.contains("gmail")
In [88]:
pattern = r"([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})" data.str.findall(pattern, flags=re.IGNORECASE)
In [89]:
matches = data.str.findall(pattern, flags=re.IGNORECASE).str[0] matches matches.str.get(1)
In [91]:
data.str.extract(pattern, flags=re.IGNORECASE)
In [92]:
values = pd.Series(['apple', 'orange', 'apple', 'apple'] * 2) values pd.unique(values) pd.value_counts(values)
In [93]:
values = pd.Series([0, 1, 0, 0] * 2) dim = pd.Series(['apple', 'orange']) values dim
In [95]:
fruits = ['apple', 'orange', 'apple', 'apple'] * 2 N = len(fruits) rng = np.random.default_rng(seed=12345) df = pd.DataFrame({'fruit': fruits, 'basket_id': np.arange(N), 'count': rng.integers(3, 15, size=N), 'weight': rng.uniform(0, 4, size=N)}, columns=['basket_id', 'fruit', 'count', 'weight']) df
In [96]:
fruit_cat = df['fruit'].astype('category') fruit_cat
In [97]:
c = fruit_cat.array type(c)
In [99]:
dict(enumerate(c.categories))
In [100]:
df['fruit'] = df['fruit'].astype('category') df["fruit"]
In [101]:
my_categories = pd.Categorical(['foo', 'bar', 'baz', 'foo', 'bar']) my_categories
In [102]:
categories = ['foo', 'bar', 'baz'] codes = [0, 1, 2, 0, 0, 1] my_cats_2 = pd.Categorical.from_codes(codes, categories) my_cats_2
In [103]:
ordered_cat = pd.Categorical.from_codes(codes, categories, ordered=True) ordered_cat
In [105]:
rng = np.random.default_rng(seed=12345) draws = rng.standard_normal(1000) draws[:5]
In [106]:
bins = pd.qcut(draws, 4) bins
In [107]:
bins = pd.qcut(draws, 4, labels=['Q1', 'Q2', 'Q3', 'Q4']) bins bins.codes[:10]
In [108]:
bins = pd.Series(bins, name='quartile') results = (pd.Series(draws) .groupby(bins) .agg(['count', 'min', 'max']) .reset_index()) results
In [110]:
N = 10_000_000 labels = pd.Series(['foo', 'bar', 'baz', 'qux'] * (N // 4))
In [111]:
categories = labels.astype('category')
In [112]:
labels.memory_usage(deep=True) categories.memory_usage(deep=True)
In [113]:
%time _ = labels.astype('category')
In [114]:
%timeit labels.value_counts() %timeit categories.value_counts()
In [115]:
s = pd.Series(['a', 'b', 'c', 'd'] * 2) cat_s = s.astype('category') cat_s
In [116]:
cat_s.cat.codes cat_s.cat.categories
In [117]:
actual_categories = ['a', 'b', 'c', 'd', 'e'] cat_s2 = cat_s.cat.set_categories(actual_categories) cat_s2
In [118]:
cat_s.value_counts() cat_s2.value_counts()
In [119]:
cat_s3 = cat_s[cat_s.isin(['a', 'b'])] cat_s3 cat_s3.cat.remove_unused_categories()
In [120]:
cat_s = pd.Series(['a', 'b', 'c', 'd'] * 2, dtype='category')
In [121]:
pd.get_dummies(cat_s, dtype=float)
In [123]:
pd.options.display.max_rows = PREVIOUS_MAX_ROWS
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4